source
stringlengths
3
92
c
stringlengths
26
2.25M
par_relax_more.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * a few more relaxation schemes: Chebychev, FCF-Jacobi, CG - * these do not go through the CF interface (hypre_BoomerAMGRelaxIF) * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "float.h" /****************************************************************************** * * use Gershgorin discs to estimate smallest and largest eigenvalues * A is assumed to be symmetric * For SPD matrix, it returns [0, max_eig = max (aii + ri)], * ri is radius of disc centered at a_ii * For SND matrix, it returns [min_eig = min (aii - ri), 0] * * scale > 0: compute eigen estimate of D^{-1/2}*A*D^{-1/2}, where * D = diag(A) for SPD matrix, D = -diag(A) for SND * * scale = 1: The algorithm is performed on D^{-1}*A, since it * has the same eigenvalues as D^{-1/2}*A*D^{-1/2} * scale = 2: The algorithm is performed on D^{-1/2}*A*D^{-1/2} (TODO) * *****************************************************************************/ HYPRE_Int hypre_ParCSRMaxEigEstimateHost( hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal? */ HYPRE_Real *max_eig, HYPRE_Real *min_eig ) { HYPRE_Int A_num_rows = hypre_ParCSRMatrixNumRows(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(A)); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(A)); HYPRE_Real *diag = NULL; HYPRE_Int i, j; HYPRE_Real e_max, e_min; HYPRE_Real send_buf[2], recv_buf[2]; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); if (scale > 1) { diag = hypre_TAlloc(HYPRE_Real, A_num_rows, memory_location); } for (i = 0; i < A_num_rows; i++) { HYPRE_Real a_ii = 0.0, r_i = 0.0, lower, upper; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { if (A_diag_j[j] == i) { a_ii = A_diag_data[j]; } else { r_i += hypre_abs(A_diag_data[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { r_i += hypre_abs(A_offd_data[j]); } lower = a_ii - r_i; upper = a_ii + r_i; if (scale == 1) { lower /= hypre_abs(a_ii); upper /= hypre_abs(a_ii); } if (i) { e_max = hypre_max(e_max, upper); e_min = hypre_min(e_min, lower); } else { e_max = upper; e_min = lower; } } send_buf[0] = -e_min; send_buf[1] = e_max; /* get e_min e_max across procs */ hypre_MPI_Allreduce(send_buf, recv_buf, 2, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); e_min = -recv_buf[0]; e_max = recv_buf[1]; /* return */ if ( hypre_abs(e_min) > hypre_abs(e_max) ) { *min_eig = e_min; *max_eig = hypre_min(0.0, e_max); } else { *min_eig = hypre_max(e_min, 0.0); *max_eig = e_max; } hypre_TFree(diag, memory_location); return hypre_error_flag; } /** * @brief Estimates the max eigenvalue using infinity norm. Will determine * whether or not to use host or device internally * * @param[in] A Matrix to relax with * @param[in] to scale by diagonal * @param[out] Maximum eigenvalue */ HYPRE_Int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Real *max_eig, HYPRE_Real *min_eig) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ParCSRMaxEigEstimate"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_ParCSRMaxEigEstimateHost(A, scale, max_eig, min_eig); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_ParCSRMaxEigEstimateDevice(A, scale, max_eig, min_eig); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /** * @brief Uses CG to get the eigenvalue estimate. Will determine whether to use * host or device internally * * @param[in] A Matrix to relax with * @param[in] scale Gets the eigenvalue est of D^{-1/2} A D^{-1/2} * @param[in] max_iter Maximum number of iterations for CG * @param[out] max_eig Estimated max eigenvalue * @param[out] min_eig Estimated min eigenvalue */ HYPRE_Int hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int max_iter, HYPRE_Real *max_eig, HYPRE_Real *min_eig) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ParCSRMaxEigEstimateCG"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(A)); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_ParCSRMaxEigEstimateCGHost(A, scale, max_iter, max_eig, min_eig); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_ParCSRMaxEigEstimateCGDevice(A, scale, max_iter, max_eig, min_eig); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /** * @brief Uses CG to get the eigenvalue estimate on the host * * @param[in] A Matrix to relax with * @param[in] scale Gets the eigenvalue est of D^{-1/2} A D^{-1/2} * @param[in] max_iter Maximum number of iterations for CG * @param[out] max_eig Estimated max eigenvalue * @param[out] min_eig Estimated min eigenvalue */ HYPRE_Int hypre_ParCSRMaxEigEstimateCGHost( hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int max_iter, HYPRE_Real *max_eig, HYPRE_Real *min_eig ) { HYPRE_Int i, j, err; hypre_ParVector *p; hypre_ParVector *s; hypre_ParVector *r; hypre_ParVector *ds; hypre_ParVector *u; HYPRE_Real *tridiag = NULL; HYPRE_Real *trioffd = NULL; HYPRE_Real lambda_max ; HYPRE_Real beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv; HYPRE_Real lambda_min; HYPRE_Real *s_data, *p_data, *ds_data, *u_data; HYPRE_Int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* check the size of A - don't iterate more than the size */ HYPRE_BigInt size = hypre_ParCSRMatrixGlobalNumRows(A); if (size < (HYPRE_BigInt) max_iter) { max_iter = (HYPRE_Int) size; } /* create some temp vectors: p, s, r , ds, u*/ r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(r); p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(p); s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(s); ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(ds); u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(u); /* point to local data */ s_data = hypre_VectorData(hypre_ParVectorLocalVector(s)); p_data = hypre_VectorData(hypre_ParVectorLocalVector(p)); ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds)); u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); /* make room for tri-diag matrix */ tridiag = hypre_CTAlloc(HYPRE_Real, max_iter + 1, HYPRE_MEMORY_HOST); trioffd = hypre_CTAlloc(HYPRE_Real, max_iter + 1, HYPRE_MEMORY_HOST); for (i = 0; i < max_iter + 1; i++) { tridiag[i] = 0; trioffd[i] = 0; } /* set residual to random */ hypre_ParVectorSetRandomValues(r, 1); if (scale) { hypre_CSRMatrixExtractDiagonal(hypre_ParCSRMatrixDiag(A), ds_data, 4); } else { /* set ds to 1 */ hypre_ParVectorSetConstantValues(ds, 1.0); } /* gamma = <r,Cr> */ gamma = hypre_ParVectorInnerProd(r, p); /* for the initial filling of the tridiag matrix */ beta = 1.0; i = 0; while (i < max_iter) { /* s = C*r */ /* TO DO: C = diag scale */ hypre_ParVectorCopy(r, s); /*gamma = <r,Cr> */ gamma_old = gamma; gamma = hypre_ParVectorInnerProd(r, s); if (i == 0) { beta = 1.0; /* p_0 = C*r */ hypre_ParVectorCopy(s, p); } else { /* beta = gamma / gamma_old */ beta = gamma / gamma_old; /* p = s + beta p */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < local_size; j++) { p_data[j] = s_data[j] + beta * p_data[j]; } } if (scale) { /* s = D^{-1/2}A*D^{-1/2}*p */ for (j = 0; j < local_size; j++) { u_data[j] = ds_data[j] * p_data[j]; } hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s); for (j = 0; j < local_size; j++) { s_data[j] = ds_data[j] * s_data[j]; } } else { /* s = A*p */ hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s); } /* <s,p> */ sdotp = hypre_ParVectorInnerProd(s, p); /* alpha = gamma / <s,p> */ alpha = gamma / sdotp; /* get tridiagonal matrix */ alphainv = 1.0 / alpha; tridiag[i + 1] = alphainv; tridiag[i] *= beta; tridiag[i] += alphainv; trioffd[i + 1] = alphainv; trioffd[i] *= sqrt(beta); /* x = x + alpha*p */ /* don't need */ /* r = r - alpha*s */ hypre_ParVectorAxpy( -alpha, s, r); i++; } /* eispack routine - eigenvalues return in tridiag and ordered*/ hypre_LINPACKcgtql1(&i, tridiag, trioffd, &err); lambda_max = tridiag[i - 1]; lambda_min = tridiag[0]; /* hypre_printf("linpack max eig est = %g\n", lambda_max);*/ /* hypre_printf("linpack min eig est = %g\n", lambda_min);*/ hypre_TFree(tridiag, HYPRE_MEMORY_HOST); hypre_TFree(trioffd, HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(r); hypre_ParVectorDestroy(s); hypre_ParVectorDestroy(p); hypre_ParVectorDestroy(ds); hypre_ParVectorDestroy(u); /* return */ *max_eig = lambda_max; *min_eig = lambda_min; return hypre_error_flag; } /****************************************************************************** Chebyshev relaxation Can specify order 1-4 (this is the order of the resid polynomial)- here we explicitly code the coefficients (instead of iteratively determining) variant 0: standard chebyshev this is rlx 11 if scale = 0, and 16 if scale == 1 variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t) this is rlx 15 if scale = 0, and 17 if scale == 1 ratio indicates the percentage of the whole spectrum to use (so .5 means half, and .1 means 10percent) *******************************************************************************/ HYPRE_Int hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */ hypre_ParVector *f, /* right-hand side */ HYPRE_Real max_eig, HYPRE_Real min_eig, HYPRE_Real fraction, HYPRE_Int order, /* polynomial order */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int variant, hypre_ParVector *u, /* initial/updated approximation */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *r /*another temp vector */) { HYPRE_Real *coefs = NULL; HYPRE_Real *ds_data = NULL; hypre_ParVector *tmp_vec = NULL; hypre_ParVector *orig_u_vec = NULL; hypre_ParCSRRelax_Cheby_Setup(A, max_eig, min_eig, fraction, order, scale, variant, &coefs, &ds_data); orig_u_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize_v2(orig_u_vec, hypre_ParCSRMatrixMemoryLocation(A)); if (scale) { tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize_v2(tmp_vec, hypre_ParCSRMatrixMemoryLocation(A)); } hypre_ParCSRRelax_Cheby_Solve(A, f, ds_data, coefs, order, scale, variant, u, v, r, orig_u_vec, tmp_vec); hypre_TFree(ds_data, hypre_ParCSRMatrixMemoryLocation(A)); hypre_TFree(coefs, HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(orig_u_vec); hypre_ParVectorDestroy(tmp_vec); return hypre_error_flag; } /*-------------------------------------------------------------------------- * CG Smoother *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax_CG( HYPRE_Solver solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int num_its) { HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */ HYPRE_PCGSetTol(solver, 0.0); /* max iterations */ HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u); #if 0 { HYPRE_Int myid; HYPRE_Int num_iterations; HYPRE_Real final_res_norm; hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid); HYPRE_PCGGetNumIterations(solver, &num_iterations); HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm); if (myid == 0) { hypre_printf(" -----CG PCG Iterations = %d\n", num_iterations); hypre_printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm); } } #endif return hypre_error_flag; } /* tql1.f -- this is the eispack translation - from Barry Smith in Petsc Note that this routine always uses real numbers (not complex) even if the underlying matrix is Hermitian. This is because the Lanczos process applied to Hermitian matrices always produces a real, symmetric tridiagonal matrix. */ HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int *n, HYPRE_Real *d, HYPRE_Real *e, HYPRE_Int *ierr) { /* System generated locals */ HYPRE_Int i__1, i__2; HYPRE_Real d__1, d__2, c_b10 = 1.0; /* Local variables */ HYPRE_Real c, f, g, h; HYPRE_Int i, j, l, m; HYPRE_Real p, r, s, c2, c3 = 0.0; HYPRE_Int l1, l2; HYPRE_Real s2 = 0.0; HYPRE_Int ii; HYPRE_Real dl1, el1; HYPRE_Int mml; HYPRE_Real tst1, tst2; /* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */ /* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */ /* WILKINSON. */ /* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */ /* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */ /* TRIDIAGONAL MATRIX BY THE QL METHOD. */ /* ON INPUT */ /* N IS THE ORDER OF THE MATRIX. */ /* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */ /* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */ /* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */ /* ON OUTPUT */ /* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */ /* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */ /* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */ /* THE SMALLEST EIGENVALUES. */ /* E HAS BEEN DESTROYED. */ /* IERR IS SET TO */ /* ZERO FOR NORMAL RETURN, */ /* J IF THE J-TH EIGENVALUE HAS NOT BEEN */ /* DETERMINED AFTER 30 ITERATIONS. */ /* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */ /* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */ /* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY */ /* THIS VERSION DATED AUGUST 1983. */ /* ------------------------------------------------------------------ */ HYPRE_Real ds; --e; --d; *ierr = 0; if (*n == 1) { goto L1001; } i__1 = *n; for (i = 2; i <= i__1; ++i) { e[i - 1] = e[i]; } f = 0.; tst1 = 0.; e[*n] = 0.; i__1 = *n; for (l = 1; l <= i__1; ++l) { j = 0; h = (d__1 = d[l], fabs(d__1)) + (d__2 = e[l], fabs(d__2)); if (tst1 < h) { tst1 = h; } /* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */ i__2 = *n; for (m = l; m <= i__2; ++m) { tst2 = tst1 + (d__1 = e[m], fabs(d__1)); if (tst2 == tst1) { goto L120; } /* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */ /* THROUGH THE BOTTOM OF THE LOOP .......... */ } L120: if (m == l) { goto L210; } L130: if (j == 30) { goto L1000; } ++j; /* .......... FORM SHIFT .......... */ l1 = l + 1; l2 = l1 + 1; g = d[l]; p = (d[l1] - g) / (e[l] * 2.); r = hypre_LINPACKcgpthy(&p, &c_b10); ds = 1.0; if (p < 0.0) { ds = -1.0; } d[l] = e[l] / (p + ds * r); d[l1] = e[l] * (p + ds * r); dl1 = d[l1]; h = g - d[l]; if (l2 > *n) { goto L145; } i__2 = *n; for (i = l2; i <= i__2; ++i) { d[i] -= h; } L145: f += h; /* .......... QL TRANSFORMATION .......... */ p = d[m]; c = 1.; c2 = c; el1 = e[l1]; s = 0.; mml = m - l; /* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */ i__2 = mml; for (ii = 1; ii <= i__2; ++ii) { c3 = c2; c2 = c; s2 = s; i = m - ii; g = c * e[i]; h = c * p; r = hypre_LINPACKcgpthy(&p, &e[i]); e[i + 1] = s * r; s = e[i] / r; c = p / r; p = c * d[i] - s * g; d[i + 1] = h + s * (c * g + s * d[i]); } p = -s * s2 * c3 * el1 * e[l] / dl1; e[l] = s * p; d[l] = c * p; tst2 = tst1 + (d__1 = e[l], fabs(d__1)); if (tst2 > tst1) { goto L130; } L210: p = d[l] + f; /* .......... ORDER EIGENVALUES .......... */ if (l == 1) { goto L250; } /* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */ i__2 = l; for (ii = 2; ii <= i__2; ++ii) { i = l + 2 - ii; if (p >= d[i - 1]) { goto L270; } d[i] = d[i - 1]; } L250: i = 1; L270: d[i] = p; } goto L1001; /* .......... SET ERROR -- NO CONVERGENCE TO AN */ /* EIGENVALUE AFTER 30 ITERATIONS .......... */ L1000: *ierr = l; L1001: return 0; } /* cgtql1_ */ HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real *a, HYPRE_Real *b) { /* System generated locals */ HYPRE_Real ret_val, d__1, d__2, d__3; /* Local variables */ HYPRE_Real p, r, s, t, u; /* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */ /* Computing MAX */ d__1 = fabs(*a), d__2 = fabs(*b); p = hypre_max(d__1, d__2); if (!p) { goto L20; } /* Computing MIN */ d__2 = fabs(*a), d__3 = fabs(*b); /* Computing 2nd power */ d__1 = hypre_min(d__2, d__3) / p; r = d__1 * d__1; L10: t = r + 4.; if (t == 4.) { goto L20; } s = r / t; u = s * 2. + 1.; p = u * p; /* Computing 2nd power */ d__1 = s / u; r = d__1 * d__1 * r; goto L10; L20: ret_val = p; return ret_val; } /* cgpthy_ */
image_pyramid.h
/* * * This file is part of the open-source SeetaFace engine, which includes three *modules: * SeetaFace Detection, SeetaFace Alignment, and SeetaFace Identification. * * This file is part of the SeetaFace Detection module, containing codes *implementing the * face detection method described in the following paper: * * * Funnel-structured cascade for multi-view face detection with alignment *awareness, * Shuzhe Wu, Meina Kan, Zhenliang He, Shiguang Shan, Xilin Chen. * In Neurocomputing (under review) * * * Copyright (C) 2016, Visual Information Processing and Learning (VIPL) group, * Institute of Computing Technology, Chinese Academy of Sciences, Beijing, *China. * * The codes are mainly developed by Shuzhe Wu (a Ph.D supervised by Prof. *Shiguang Shan) * * As an open-source face recognition engine: you can redistribute SeetaFace *source codes * and/or modify it under the terms of the BSD 2-Clause License. * * You should have received a copy of the BSD 2-Clause License along with the *software. * If not, see < https://opensource.org/licenses/BSD-2-Clause>. * * Contact Info: you can send an email to SeetaFace@vipl.ict.ac.cn for any *problems. * * Note: the above information must be kept whenever or wherever the codes are *used. * */ #ifndef SEETA_FD_UTIL_IMAGE_PYRAMID_H_ #define SEETA_FD_UTIL_IMAGE_PYRAMID_H_ #include <cstdint> #include <string> #include <cstring> #include "common.h" namespace seeta { namespace fd { static void ResizeImage(const seeta::ImageData& src, seeta::ImageData* dest) { int32_t src_width = src.width; int32_t src_height = src.height; int32_t dest_width = dest->width; int32_t dest_height = dest->height; if (src_width == dest_width && src_height == dest_height) { std::memcpy(dest->data, src.data, src_width * src_height * sizeof(uint8_t)); return; } double lf_x_scl = static_cast<double>(src_width) / dest_width; double lf_y_Scl = static_cast<double>(src_height) / dest_height; const uint8_t* src_data = src.data; uint8_t* dest_data = dest->data; #pragma omp parallel num_threads(SEETA_NUM_THREADS) { #pragma omp for nowait for (int32_t y = 0; y < dest_height; y++) { for (int32_t x = 0; x < dest_width; x++) { double lf_x_s = lf_x_scl * x; double lf_y_s = lf_y_Scl * y; int32_t n_x_s = static_cast<int>(lf_x_s); n_x_s = (n_x_s <= (src_width - 2) ? n_x_s : (src_width - 2)); int32_t n_y_s = static_cast<int>(lf_y_s); n_y_s = (n_y_s <= (src_height - 2) ? n_y_s : (src_height - 2)); double lf_weight_x = lf_x_s - n_x_s; double lf_weight_y = lf_y_s - n_y_s; double dest_val = (1 - lf_weight_y) * ((1 - lf_weight_x) * src_data[n_y_s * src_width + n_x_s] + lf_weight_x * src_data[n_y_s * src_width + n_x_s + 1]) + lf_weight_y * ((1 - lf_weight_x) * src_data[(n_y_s + 1) * src_width + n_x_s] + lf_weight_x * src_data[(n_y_s + 1) * src_width + n_x_s + 1]); dest_data[y * dest_width + x] = static_cast<uint8_t>(dest_val); } } } } class ImagePyramid { public: ImagePyramid() : max_scale_(1.0f), min_scale_(1.0f), scale_factor_(1.0f), scale_step_(0.8f), width1x_(0), height1x_(0), width_scaled_(0), height_scaled_(0), buf_img_width_(2), buf_img_height_(2), buf_scaled_width_(2), buf_scaled_height_(2) { buf_img_ = new uint8_t[buf_img_width_ * buf_img_height_]; buf_img_scaled_ = new uint8_t[buf_scaled_width_ * buf_scaled_height_]; } ~ImagePyramid() { delete[] buf_img_; buf_img_ = nullptr; buf_img_width_ = 0; buf_img_height_ = 0; delete[] buf_img_scaled_; buf_img_scaled_ = nullptr; buf_scaled_width_ = 0; buf_scaled_height_ = 0; img_scaled_.data = nullptr; img_scaled_.width = 0; img_scaled_.height = 0; } inline void SetScaleStep(float step) { if (step > 0.0f && step <= 1.0f) scale_step_ = step; } inline void SetMinScale(float min_scale) { min_scale_ = min_scale; } inline void SetMaxScale(float max_scale) { max_scale_ = max_scale; scale_factor_ = max_scale; UpdateBufScaled(); } void SetImage1x(const uint8_t* img_data, int32_t width, int32_t height); inline float min_scale() const { return min_scale_; } inline float max_scale() const { return max_scale_; } inline seeta::ImageData image1x() { seeta::ImageData img(width1x_, height1x_, 1); img.data = buf_img_; return img; } const seeta::ImageData* GetNextScaleImage(float* scale_factor = nullptr); private: void UpdateBufScaled(); float max_scale_; float min_scale_; float scale_factor_; float scale_step_; int32_t width1x_; int32_t height1x_; int32_t width_scaled_; int32_t height_scaled_; uint8_t* buf_img_; int32_t buf_img_width_; int32_t buf_img_height_; uint8_t* buf_img_scaled_; int32_t buf_scaled_width_; int32_t buf_scaled_height_; seeta::ImageData img_scaled_; }; } // namespace fd } // namespace seeta #endif // SEETA_FD_UTIL_IMAGE_PYRAMID_H_
common.h
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <cstdio> #include <string> #include <vector> #include <sstream> #include <cstdint> #include <algorithm> #include <cmath> #include <functional> #include <memory> #include <iterator> #include <type_traits> #include <iomanip> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = char(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t ) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t ) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out= static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformaton on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (size_t i = 0; i < input.size(); ++i) { ret.push_back(input.at(i).get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; for (size_t i = start; i < keys.size(); ++i) { arr.emplace_back(keys[i], values[i]); } if (!is_reverse) { std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { keys[i] = arr[i].first; values[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) { std::vector<T*> ptr(data.size()); for (size_t i = 0; i < data.size(); ++i) { ptr[i] = data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (x >= 1e300) { return 1e300; } else if(x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static,1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static,1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
initialize.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" //--------------------------------------------------------------------- // This subroutine initializes the field variable u using // tri-linear transfinite interpolation of the boundary values //--------------------------------------------------------------------- void initialize() { int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; #pragma omp parallel default(shared) \ private(i,j,k,m,zeta,eta,xi,ix,iy,iz,Pxi,Peta,Pzeta,Pface,temp) { //--------------------------------------------------------------------- // Later (in compute_rhs) we compute 1/u for every element. A few of // the corner elements are not used, but it convenient (and faster) // to compute the whole thing with a simple loop. Make sure those // values are nonzero by initializing the whole thing here. //--------------------------------------------------------------------- #pragma omp for schedule(static) for (k = 0; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { u[k][j][i][0] = 1.0; u[k][j][i][1] = 0.0; u[k][j][i][2] = 0.0; u[k][j][i][3] = 0.0; u[k][j][i][4] = 1.0; } } } //--------------------------------------------------------------------- // first store the "interpolated" values everywhere on the grid //--------------------------------------------------------------------- #pragma omp for schedule(static) nowait for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (ix = 0; ix < 2; ix++) { Pxi = (double)ix; exact_solution(Pxi, eta, zeta, &Pface[ix][0][0]); } for (iy = 0; iy < 2; iy++) { Peta = (double)iy; exact_solution(xi, Peta, zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { Pzeta = (double)iz; exact_solution(xi, eta, Pzeta, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[k][j][i][m] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } //--------------------------------------------------------------------- // now store the exact values on the boundaries //--------------------------------------------------------------------- //--------------------------------------------------------------------- // west face //--------------------------------------------------------------------- xi = 0.0; i = 0; #pragma omp for schedule(static) nowait for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // east face //--------------------------------------------------------------------- xi = 1.0; i = grid_points[0]-1; #pragma omp for schedule(static) nowait for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // south face //--------------------------------------------------------------------- eta = 0.0; j = 0; #pragma omp for schedule(static) nowait for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // north face //--------------------------------------------------------------------- eta = 1.0; j = grid_points[1]-1; #pragma omp for schedule(static) for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // bottom face //--------------------------------------------------------------------- zeta = 0.0; k = 0; #pragma omp for schedule(static) nowait for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i =0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // top face //--------------------------------------------------------------------- zeta = 1.0; k = grid_points[2]-1; #pragma omp for schedule(static) nowait for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i =0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } } //end parallel } void lhsinit(int ni, int nj) { int j, m; //--------------------------------------------------------------------- // zap the whole left hand side for starters // set all diagonal values to 1. This is overkill, but convenient //--------------------------------------------------------------------- for (j = 1; j <= nj; j++) { for (m = 0; m < 5; m++) { lhs [j][0][m] = 0.0; lhsp[j][0][m] = 0.0; lhsm[j][0][m] = 0.0; lhs [j][ni][m] = 0.0; lhsp[j][ni][m] = 0.0; lhsm[j][ni][m] = 0.0; } lhs [j][0][2] = 1.0; lhsp[j][0][2] = 1.0; lhsm[j][0][2] = 1.0; lhs [j][ni][2] = 1.0; lhsp[j][ni][2] = 1.0; lhsm[j][ni][2] = 1.0; } } void lhsinitj(int nj, int ni) { int i, m; //--------------------------------------------------------------------- // zap the whole left hand side for starters // set all diagonal values to 1. This is overkill, but convenient //--------------------------------------------------------------------- for (i = 1; i <= ni; i++) { for (m = 0; m < 5; m++) { lhs [0][i][m] = 0.0; lhsp[0][i][m] = 0.0; lhsm[0][i][m] = 0.0; lhs [nj][i][m] = 0.0; lhsp[nj][i][m] = 0.0; lhsm[nj][i][m] = 0.0; } lhs [0][i][2] = 1.0; lhsp[0][i][2] = 1.0; lhsm[0][i][2] = 1.0; lhs [nj][i][2] = 1.0; lhsp[nj][i][2] = 1.0; lhsm[nj][i][2] = 1.0; } }
relax.c
/*BHEADER********************************************************************** * Copyright (c) 2006 The Regents of the University of California. * Produced at the Lawrence Livermore National Laboratory. * Written by the HYPRE team. UCRL-CODE-222953. * All rights reserved. * * This file is part of HYPRE (see http://www.llnl.gov/CASC/hypre/). * Please see the COPYRIGHT_and_LICENSE file for the copyright notice, * disclaimer, contact information and the GNU Lesser General Public License. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License (as published by the Free Software * Foundation) version 2.1 dated February 1999. * * HYPRE is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General * Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * $Revision: 2.8 $ ***********************************************************************EHEADER*/ /****************************************************************************** * * Relaxation scheme * *****************************************************************************/ #include "headers.h" #include "omp.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGSeqRelax *--------------------------------------------------------------------------*/ int hypre_BoomerAMGSeqRelax( hypre_CSRMatrix *A, hypre_Vector *f, hypre_Vector *u) { double *A_diag_data = hypre_CSRMatrixData(A); int *A_diag_i = hypre_CSRMatrixI(A); int *A_diag_j = hypre_CSRMatrixJ(A); int n = hypre_CSRMatrixNumRows(A); double *u_data = hypre_VectorData(u); double *f_data = hypre_VectorData(f); double *tmp_data; double res; int i, j; int ii, jj; int ns, ne, size, rest; int relax_error = 0; // int index, start; int num_threads; num_threads = hypre_NumThreads(); /*----------------------------------------------------------------------- * Switch statement to direct control based on relax_type: * relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (forward solve) *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (1) { tmp_data = hypre_CTAlloc(double,n); #pragma omp parallel private(num_threads) { num_threads = omp_get_num_threads(); #pragma omp for private(i) for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #pragma omp for private(i,ii,j,jj,ns,ne,res,rest,size) for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != 0.0) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } hypre_TFree(tmp_data); } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != 0.0) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } return(relax_error); }
benchmark_bandwidth.c
/************************************************************************************************** * * * This file is part of BLASFEO. * * * * BLASFEO -- BLAS For Embedded Optimization. * * Copyright (C) 2019 by Gianluca Frison. * * Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl. * * All rights reserved. * * * * The 2-Clause BSD License * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following conditions are met: * * * * 1. Redistributions of source code must retain the above copyright notice, this * * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * * this list of conditions and the following disclaimer in the documentation * * and/or other materials provided with the distribution. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * * Author: Gianluca Frison, gianluca.frison (at) imtek.uni-freiburg.de * * * **************************************************************************************************/ #include <stdlib.h> #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "../include/blasfeo.h" #include "benchmark_x_common.h" int main() { int nn, ii, rep, nrep; blasfeo_timer timer; double tmp_time, bandwidth_set_sequential, bandwidth_copy_sequential, bandwidth_set_parallel, bandwidth_copy_parallel; int size[] = {16, 32, 48, 64, 80, 96, 112, 128, 256, 384, 512, 768, 1024, 1536, 2048, 2560, 3072, 3584, 4096, 4608, 5120, 6144, 8192, 12288, 16384, 24576, 32768, 40960, 49152, 65536, 81920, 98304, 131072, 196608, 262144, 393216}; int nnrep[] = {4000000, 4000000, 4000000, 2000000, 2000000, 1000000, 1000000, 1000000, 400000, 400000, 400000, 200000, 200000, 200000, 100000, 100000, 100000, 40000, 40000, 40000, 20000, 20000, 20000, 10000, 10000, 10000, 10000, 4000, 4000, 4000, 2000, 2000, 2000, 1000, 1000, 1000}; int n_size = 36; #ifdef _OPENMP int num_threads = 2; omp_set_num_threads(num_threads); printf("\nusing %d threads\n", num_threads); #endif printf("\nsize (KB)\tset sequential\tset parrallel\tspeedup\t\tcopy sequential\tcopy parrallel\tspeedup\n"); for(nn=0; nn<n_size; nn++) { nrep = nnrep[nn]; double *x = malloc(size[nn]*sizeof(double)); double *y = malloc(size[nn]*sizeof(double)); int id = -1; /* set benchmark sequential */ // set to zero for(ii=0; ii<size[nn]; ii++) { x[ii] = 0.0; } blasfeo_tic(&timer); for(rep=0; rep<nrep; rep++) { for(ii=0; ii<size[nn]; ii++) { x[ii] = 0.0; } } tmp_time = blasfeo_toc(&timer) / nrep; bandwidth_set_sequential = size[nn] * 8.0 / tmp_time; /* set benchmark parallel */ #pragma omp parallel private(id, rep, ii) { //id = omp_get_thread_num(); //printf("\nid %d\n", id); #pragma omp master { // set to zero for(ii=0; ii<size[nn]; ii++) { x[ii] = 0.0; } blasfeo_tic(&timer); } // master #pragma omp for schedule(static) for(rep=0; rep<nrep; rep++) { for(ii=0; ii<size[nn]; ii++) { x[ii] = 0.0; } } #pragma omp master { tmp_time = blasfeo_toc(&timer) / nrep; } // master } // parallel bandwidth_set_parallel = size[nn] * 8.0 / tmp_time; /* copy benchmark sequential */ // copy for(ii=0; ii<size[nn]; ii++) { y[ii] = x[ii]; } blasfeo_tic(&timer); for(rep=0; rep<nrep; rep++) { for(ii=0; ii<size[nn]; ii++) { y[ii] = x[ii]; } } tmp_time = blasfeo_toc(&timer) / nrep; bandwidth_copy_sequential = size[nn] * 2 * 8.0 / tmp_time; /* copy benchmark parallel */ #pragma omp parallel private(id, rep, ii) { //id = omp_get_thread_num(); //printf("\nid %d\n", id); #pragma omp master { // copy for(ii=0; ii<size[nn]; ii++) { y[ii] = x[ii]; } blasfeo_tic(&timer); } // master #pragma omp for schedule(static) for(rep=0; rep<nrep; rep++) { for(ii=0; ii<size[nn]; ii++) { y[ii] = x[ii]; } } #pragma omp master { tmp_time = blasfeo_toc(&timer) / nrep; } // master } // parallel bandwidth_copy_parallel = size[nn] * 2 * 8.0 / tmp_time; // print printf("%f\t%e\t%e\t%f\t%e\t%e\t%f\n", size[nn]*8.0/1024.0, bandwidth_set_sequential, bandwidth_set_parallel, bandwidth_set_parallel/bandwidth_set_sequential, bandwidth_copy_sequential, bandwidth_copy_parallel, bandwidth_copy_parallel/bandwidth_copy_sequential); free(x); free(y); } return 0; }
GB_subassign_18.c
//------------------------------------------------------------------------------ // GB_subassign_18: C(I,J)<!M,repl> = A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 18: C(I,J)<!M,repl> = A ; using S // M: present // Mask_comp: true // C_replace: true // accum: NULL // A: matrix // S: constructed #define GB_FREE_WORK GB_FREE_TWO_SLICE #include "GB_subassign_methods.h" GrB_Info GB_subassign_18 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const GrB_Matrix A, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; GB_GET_MASK ; const bool M_is_hyper = M->is_hyper ; const int64_t Mnvec = M->nvec ; GB_GET_A ; GB_GET_S ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 18: C(I,J)<!M,repl> = A ; using S //-------------------------------------------------------------------------- // Time: Optimal. O((nnz(A)+nnz(S))*log(m)), since all entries in S+A must // be traversed, and the corresponding entry in M (even if not present) // determines the action to take. log(m) is the # of entries in a column // of M. // Method 10 and 18 are very similar. //-------------------------------------------------------------------------- // Parallel: Z=A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- GB_SUBASSIGN_TWO_SLICE (A, S) ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get A(:,j) and S(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // get M(:,j) //------------------------------------------------------------------ int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and A(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = Si [pS] ; int64_t iA = Ai [pA] ; if (iS < iA) { // S (i,j) is present but A (i,j) is not // ----[C . 1] or [X . 1]----------------------------------- // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still zombie // ----[C . 0] or [X . 0]----------------------------------- // [X . 0]: action: ( X ): still a zombie // [C . 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (iA < iS) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH (iA) ; mij = !mij ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (A) ; } else { // both S (i,j) and A (i,j) present GB_MIJ_BINARY_SEARCH (iA) ; mij = !mij ; GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =A ): A to C no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_matrix ; } else { // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (A) ; } } // while list S (:,j) has entries. List A (:,j) exhausted while (pS < pS_end) { // ----[C . 1] or [X . 1]--------------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still a zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } // while list A (:,j) has entries. List S (:,j) exhausted while (pA < pA_end) { // S (i,j) is not present, A (i,j) is present int64_t iA = Ai [pA] ; GB_MIJ_BINARY_SEARCH (iA) ; mij = !mij ; if (mij) { // ----[. A 1]---------------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (A) ; } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get A(:,j) and S(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // get M(:,j) //------------------------------------------------------------------ int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and A(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = Si [pS] ; int64_t iA = Ai [pA] ; if (iS < iA) { // S (i,j) is present but A (i,j) is not GB_NEXT (S) ; } else if (iA < iS) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH (iA) ; mij = !mij ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; } GB_NEXT (A) ; } else { // both S (i,j) and A (i,j) present GB_NEXT (S) ; GB_NEXT (A) ; } } // while list A (:,j) has entries. List S (:,j) exhausted while (pA < pA_end) { // S (i,j) is not present, A (i,j) is present int64_t iA = Ai [pA] ; GB_MIJ_BINARY_SEARCH (iA) ; mij = !mij ; if (mij) { // ----[. A 1]---------------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; } GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
GB_unop__identity_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_int8_int8) // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mxEvaluateVertAverage.c
// // main.c // matEvaluateVertAverage // // Created by li12242 on 17/11/1. // Copyright (c) 2017年 li12242. All rights reserved. // #include "mex.h" #define INF 10.0e9 #ifdef _OPENMP #include <omp.h> #endif #define max(a, b) ((a > b) ? a : b) #define min(a, b) ((a < b) ? a : b) void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){ /* check input & output */ if (nrhs != 6){ mexErrMsgIdAndTxt("Matlab:mxEvaluateVertAverage:InvalidNumberInput", "4 inputs required."); } if (nlhs != 3){ mexErrMsgIdAndTxt("Matlab:mxEvaluateVertAverage:InvalidNumberOutput", "1 output required."); } /* get inputs */ //double *cvar = mxGetPr(prhs[0]); int Nv = (int)mxGetScalar(prhs[1]); double *NkToV = mxGetPr(prhs[2]); // number of element connecting to each vertex double *VToM = mxGetPr(prhs[3]); double *VToK = mxGetPr(prhs[4]); double *VToW = mxGetPr(prhs[5]); /* get dimensions */ size_t maxNk = mxGetM(prhs[3]); // maximum number of cells connecting to one vertex // size_t K = mxGetN(prhs[2]); // number of elements /* allocate output array */ plhs[0] = mxCreateDoubleMatrix((mwSize)Nv, (mwSize)1, mxREAL); plhs[1] = mxCreateDoubleMatrix((mwSize)Nv, (mwSize)1, mxREAL); plhs[2] = mxCreateDoubleMatrix((mwSize)Nv, (mwSize)1, mxREAL); double *fvert = mxGetPr(plhs[0]); double *fvmin = mxGetPr(plhs[1]); double *fvmax = mxGetPr(plhs[2]); #ifdef _OPENMP #pragma omp parallel for num_threads(DG_THREADS) #endif for (int n=0; n<Nv; n++) { int Nk = NkToV[n]; // number of cells connecting to vertex n fvmax[n] = -INF; fvmin[n] = INF; for (int k=0; k<Nk; k++) { int meshId = (int) VToM[n*maxNk + k] - 1; int cellId = (int) VToK[n*maxNk + k] - 1; double w = VToW[n*maxNk + k]; mxArray *mcvar = mxGetCell(prhs[0], meshId); if (mcvar == NULL) { mexErrMsgIdAndTxt("Matlab:mxEvaluateVertAverage:AccessToMeshField", "Access to the mesh field failed."); } double *cvar = mxGetPr(mcvar); double temp = cvar[cellId]; fvert[n] += w*temp; fvmax[n] = max(fvmax[n], temp); fvmin[n] = min(fvmin[n], temp); } } return; }
alignments_slow.c
/*----------------------------------------------------------------------------- alignments_slow \ author: jose lezama/rafael grompone von gioi \ version: 5.1 (2014.08.12) \ year: 2014 \ desc: MatLab interface for aligned point detector v5 (CVPR 2014) \ input: \ -req: input_points | Input points (Nx2 matrix). \ -opt: min_k | Minimum number of points in alignment. \ -opt: xsize | X axis domain size. \ -opt: ysize | Y axis domain size. \ -opt: epsilon | Detection threshold, (max. NFA). \ -opt: min_width | Minimal alignment width tested. \ -opt: locality | Size factor of locality. \ -opt: length_ratio | Min ratio b/w length and width. \ output: \ -6xNout alignments matrix with: x1, x2, y1, y2, width, nfa \ ------------------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <limits.h> #include <float.h> #include <time.h> #include <omp.h> #include "lib/jirafa_lib.h" #include "lib/ntuples_aux.h" #include "lib/alignments.h" /** sqrt(2) */ #define SQRT2 1.414213562373 /* aux function */ #define dprint(expr) printf(#expr " = %g \n", (double) expr) /*----------------------------------------------------------------------------*/ /* look for alignments */ /* (core of the algorithm) */ void detect_alignments(double logNT, double log_epsilon, ntuple_list points, double min_width, double locality, double length_ratio, double min_k, int * cells, alignments_list *all_alignments, struct point_alignment *best_align){ int i,j,l,n; double x,y,xc,yc,dx,dy,theta,lateral_dist,long_dist,fwd_dist; double width; struct point_alignment align; ntuple_list ll; /* initialize best_align */ best_align->nfa = -logNT; /* -DBL_MAX; */ best_align->l = NULL; int NlocalLeft = 0; int NlocalRight = 0; int NlocalAlignment = 0; int point_in_alignment=0; /* test all possible aligned points */ time_t mytimer; mytimer = time(NULL); int total_num_tests = 0; for(i=0; i<points->size; i++) { #pragma omp parallel for ordered default(none) private( ll, align, l, n, x, y, xc, yc, dx, dy, theta, locality, lateral_dist, long_dist, fwd_dist, width, NlocalLeft, NlocalRight, NlocalAlignment, point_in_alignment, cells) shared(best_align, all_alignments, mytimer, total_num_tests, log_epsilon, logNT, min_k, min_width, length_ratio, points,i) for(j=i+1; j<points->size; j++) { align.i = i; align.j = j; align.x1 = points->values[ i*points->dim + 0 ]; align.y1 = points->values[ i*points->dim + 1 ]; align.x2 = points->values[ j*points->dim + 0 ]; align.y2 = points->values[ j*points->dim + 1 ]; align.len = dist(align.x1,align.y1,align.x2,align.y2); if (align.len < length_ratio) continue; xc = ( align.x1 + align.x2 ) / 2.0; yc = ( align.y1 + align.y2 ) / 2.0; theta = atan2( align.y2 - align.y1, align.x2 - align.x1 ); align.dx = dx = cos(theta); align.dy = dy = sin(theta); int wi; width = (align.len / length_ratio); int * cells2; cells2 = (int *) calloc( (size_t) 1000, sizeof(int) ); for(wi = 0; wi <6 ; wi++) { width/=2;/*SQRT2;*/ if(width < min_width) break; align.width = width; /* use around K cases where K is the number of points in the alignment */ /* count number of points in alignment */ double K=0; for(l=0;l<points->size;l++) if( l!=i && l!=j ) { x = points->values[ l*points->dim + 0 ]; y = points->values[ l*points->dim + 1 ]; lateral_dist = fabs( -(x-xc)*dy + (y-yc)*dx ); fwd_dist = (x-align.x1)*dx + (y-align.y1)*dy; /* the point is in the alignment? */ if( lateral_dist < 0.5 * width && fwd_dist > 0 && fwd_dist < align.len ) K++; } if (K < min_k) { continue; } /* explore a range of number of cases around K */ double min_n = (int) fmax(4,K/2); double max_n = (int) 1.5*K; for(n = min_n; n<=max_n; n=(int) n*SQRT2) { align.n = n; align.s = align.len/(double) (n+1); /* cell size */ double loc_exp; /*25 sep 2013, begin with square local window*/ locality = (align.len - width)/(2.0*width)*SQRT2; /*for(loc_exp = 2; loc_exp <=32; loc_exp *= 2 ){*/ for(loc_exp = 0; loc_exp <6; loc_exp++){ /* locality = pow(2,loc_exp/2.0); */ locality/=2; /* SQRT2; */ if (width*locality>250 || locality < 2) break; /* po is the probability of 1 point falling in one case */ align.po = 1.0 / ( (double) n * (2.0*locality+1.0) ); /* /\* compute active cells and number of local points*\/ */ align.k = 0; for(l=0;l<n;l++) cells2[l] = 0; align.locality = locality; align.Nlocal = 0; /* new in algorithm 3: count points left and righ of the alignment */ NlocalLeft = NlocalRight = NlocalAlignment = 0; /* initialize list of interior points */ ll = new_ntuple_list(1); align.l = ll; /* new_ntuple_list(1); */ for(l=0;l<points->size;l++) if( l!=i && l!=j ) { x = points->values[ l*points->dim + 0 ]; y = points->values[ l*points->dim + 1 ]; point_in_alignment = 0; lateral_dist = fabs( -(x-xc)*dy + (y-yc)*dx ); fwd_dist = (x-align.x1)*dx + (y-align.y1)*dy; long_dist = fabs( (x-xc)*dx + (y-yc)*dy ); /* the point is in the alignment? */ if( lateral_dist < 0.5 * width && fwd_dist > 0.5 * align.s && fwd_dist < ( (double) n + 0.5 ) * align.s ) { point_in_alignment=1; NlocalAlignment++; cells2[ (int) (fwd_dist/align.s - 0.5) ] = 1; /* point is in the alignment */ /* append the point */ append_point_to_alignment(&align, (unsigned int) l); /* align.k++; */ } /* the point is in the local area? */ if( lateral_dist < (locality+0.5)*width && long_dist < ( (double) n / 2.0 *align.s ) ) { align.Nlocal++; if(point_in_alignment <1) if (-(x-xc)*dy + (y-yc)*dx >0) NlocalLeft++; else NlocalRight++; } } /* count occupied cases */ for(l=0;l<n;l++) align.k += cells2[l]; if (align.k <=0) { free_ntuple_list(align.l); continue; } /* /\* compute event probability *\/ */ align.Nlocal = 2*fmax(NlocalLeft,NlocalRight) + NlocalAlignment; /* dprint(align.Nlocal); */ align.p = 1.0 - pow( 1.0 - align.po, (double) align.Nlocal ); if( align.p > 0.0 && align.p < 1.0 ){ align.nfa = nfa(n,align.k,align.p,logNT); /*total_num_tests++;*/ } else { align.nfa = -logNT; /* -DBL_MAX; */ /*total_num_tests++;*/ } /* #pragma omp ordered */ #pragma omp critical { /* if( align.nfa > best_align->nfa ) { */ /* if(best_align->l!=NULL) free_ntuple_list(best_align->l); */ /* copy_align(&align,best_align); */ /* } */ if( align.nfa >= log_epsilon ) /* if significant event */ /*if( 1 ) /* if significant event */ { append_alignment(all_alignments, &align); } } free_ntuple_list(align.l); } } } free(cells2); } } /* printf("total real number of tests: %d\n", total_num_tests);*/ } /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ /* Main */ /*----------------------------------------------------------------------------*/ int main(int argc, char ** argv){ return 0; } /*----------------------------------------------------------------------------*/ /* Matlab interface */ /*----------------------------------------------------------------------------*/ extern void mexFunction_alignment_slow(double * input_points, int X_in, int N_in, double ** output_points, int *X_out, int *N_out); void mexFunction_alignment_slow(double * input_points, int X_in, int N_in, double ** output_points, int *X_out, int *N_out){ /* input: list of 2D points output: 7xN matrix with info for N alignment detections */ /* input_points input points of size 2*N first N x coordinates, then N y coordinates */ double xsize; double ysize; double epsilon; double min_width; double locality; double min_k; double length_ratio; min_k = 2; xsize = 512; ysize = 512; epsilon = 10.; min_width = 1; locality = 10; length_ratio = 25; int X = X_in; /* number of columns (should be 2) */ int N = N_in; /* number of points */ // printf("entered data is\n"); // printf("cols: %d\n", X_in); // printf("rows: %d\n", N_in); // printf("entered data array is\n"); // for (int i=0; i < X * N; ++i) // printf("cols: %f\n", input_points[i]); // /* omp_set_num_threads(256); */ struct point_alignment align, best_align; /* initiate alignments list */ alignments_list *all_alignments = (alignments_list*)calloc(sizeof(alignments_list), 1); all_alignments->array = (struct point_alignment*)realloc(all_alignments->array, (all_alignments->capacity = 4) * sizeof(struct point_alignment)); ntuple_list points; double x,y,xc,yc,dx,dy,theta,lateral_dist,long_dist,fwd_dist; double width; unsigned int i,j,l,n; int num_test = 0; double logNT; int * cells; FILE * eps; time_t timer; /* read input */ points = new_ntuple_list(2); for(i=0;i<N;i++){ if( points->size >= points->max_size ) enlarge_ntuple_list(points); points->values[ points->size * points->dim + 0 ] = input_points[i]; points->values[ points->size * points->dim + 1 ] = input_points[N+i]; points->size++; } n=points->size; /* NUMBER OF TESTS */ num_test= n * (n-1)/2; // printf("points: %d\n", n); // printf("num_test: %d\n", num_test); logNT = log10( (double) num_test ); double log_epsilon = -log10(epsilon); /* initialize best_align */ best_align.nfa = -logNT; int nmax = dist(xsize,ysize,0.0,0.0) / min_width; cells = (int *) calloc( (size_t) nmax, sizeof(int) ); if( cells == NULL ) error("not enough memory."); /*--------------- detect alignments -------------------------------------- */ //printf("detecting alignments...\n"); timer = time(NULL); detect_alignments(logNT, log_epsilon, points, min_width, locality, length_ratio, min_k, cells, all_alignments, &best_align); ///printf("\nfinished detecting alignments...\n"); //printf("time elapsed: %d seconds\n", (int) (time(NULL)-timer)); dprint(best_align.nfa); /*--------------- end detect alignments------------------------------------ */ // if(sizeof(all_alignments->array)/sizeof(all_alignments->array[0]) == 0){ // all_alignments->pos = 0; // } // printf("before sorting all_alignments pos is %d, array size is %d\n", all_alignments->pos,sizeof(all_alignments->array)/sizeof(all_alignments->array[0])); /* sort alignements */ qsort ((*all_alignments).array, all_alignments->pos, sizeof(struct point_alignment), compare_alignments); // printf("after sorting all_alignments pos is %d, array size is %d\n", all_alignments->pos,sizeof(all_alignments->array)/sizeof(all_alignments->array[0])); /* --------------------------------------------------------------------------------- */ /* 1-vs-1 exclusion principle: start a list of alignments with the most significant one, then one by one check if they are masked */ /* IMPORTANT: all_alignments are ordered backwards */ /* create the list */ // printf("applying exclusion principle on resulting %d alingments...\n", all_alignments->pos); timer = time(NULL); alignments_list *f1v1_alignments = (alignments_list*)calloc(sizeof(alignments_list), 1); /* it is possible that there are no alignments*/ if (all_alignments->pos != 0){ /* append the first alignment to this list */ append_alignment(f1v1_alignments, &(all_alignments->array[all_alignments->pos-1])); int ismasked; signed int ai; for(ai=(*all_alignments).pos-2;ai>=1;ai--){ struct point_alignment temp_alignment_b = all_alignments->array[ai]; ismasked=0; for(j=0;j<(*f1v1_alignments).pos;j++){ struct point_alignment temp_alignment_a = f1v1_alignments->array[j]; /* check if alignments from final list mask alignment in all-alignments list */ double im=is_masked_cases(points, &temp_alignment_a, &temp_alignment_b, logNT); if (im<=log_epsilon) ismasked=1; } if (ismasked ==0){ /* not masked, add to final list */ append_alignment(f1v1_alignments, &temp_alignment_b); } } // printf( "number of final alignments after 1vs1: %d \n",(int) (*f1v1_alignments).pos); } // printf("\nfinished exclusion principle...\n"); // printf("time elapsed: %d seconds\n", (int) (time(NULL)-timer)); /* END of 1 vs 1 exclusion principle */ /* ------------------------------------------------------------------------ */ /*----------------- MATLAB: prepare output variables --------------------*/ /* output variables definitions */ int lw = 6; /* x1,y1,x2,y2,width,-log10(NFA)*/ int n_out = (int)(*f1v1_alignments).pos; // if(n_out == 0) // n_out = 1; // plhs[0] = mxCreateDoubleMatrix( lw, n_out ,mxREAL); /* Get a pointer to the data space in our newly allocated memory */ double* outArray = (double *) malloc (sizeof (double) * (lw * n_out)); // printf("%d shaped out data array is\n", lw * n_out); /* Copy matrix while multiplying each point by 2 */ int ii; for( ii=(*f1v1_alignments).pos-1;ii>=0;ii--){ /* hack to do backward loop */ // printf("%d out array assignment loop \n", ii); i=ii; struct point_alignment temp_alignment = f1v1_alignments->array[i]; /* struct point_alignment temp_alignment = f1v1_alignments->array[ii]; */ outArray[ii*lw+0] = temp_alignment.x1; outArray[ii*lw+1] = temp_alignment.y1; outArray[ii*lw+2] = temp_alignment.x2; outArray[ii*lw+3] = temp_alignment.y2; outArray[ii*lw+4] = temp_alignment.width; outArray[ii*lw+5] = temp_alignment.nfa; } // printf("out array assignment loop finished \n"); // printf("out array print loop %d\n", n_out); // for (int i=0; i < 6 * n_out; ++i) // printf("%f ", outArray[i]); // printf("\n"); /* free memory */ if(best_align.l != NULL) free_ntuple_list(best_align.l); // printf("freeing free_ntuple_list finished \n"); // printf("all_alignments pos is %d, array size is %d\n", all_alignments->pos,sizeof(all_alignments->array)/sizeof(all_alignments->array[0])); free_alignment_list(all_alignments); // printf("freeing all_alignments finished\n"); free_alignment_list(f1v1_alignments); // printf("freeing f1v1_alignments finished\n"); free(cells); // printf("freeing cells finished\n"); free_ntuple_list(points); // printf("freeing points finished\n"); /* free( (void*) cells ); free_ntuple_list(points); */ /* omp_set_num_threads(256); */ // return outArray; *output_points = outArray; // printf("assigning output points finished\n"); *X_out = 6; // printf("assigning xout finished\n"); *N_out = n_out; // printf("assigning nout finished\n"); } /*----------------------------------------------------------------------------*/ void free_mem(double* a) { free(a); }
FrameBuffer.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <mutex> #include <iostream> #include <fstream> #include <array> #include <vector> #include <memory> #include <malloc.h> #include "bb/DataType.h" #include "bb/Object.h" #include "bb/Tensor.h" namespace bb { // [FrameBuffer クラス] // ・LayerとLayerの接続に利用 // ・Layerのパラメータ構成に必要な情報を保持 // ・Tensorクラスで実体を保有 // ・Tensor内のデータの各次元の意味付けを行う(Sparseで性能の出る軸で管理) // // 外部APIとしては、Tensor が複数フレーム格納されるたような形式に見せる // 内部的には1つの Tensor に統合する。即ち内部 Tensor は次数が1多い // メモリ配置は NCHW でも NHWC でもなく、CHWN を意図しており、N = frame の意である // // また ここで node_size や node などの語を定義している。これは各フレームの // Tensor を1次元のフラットでアクセスする事を意図した用語である。 // CUDAやSIMD命令での操作を強く意図しており、これらを使ってプログラミングするときは // メモリ配置を強く意識する必要がある。 shape は上位からメモリにアクセスをする際に // 利便性を向上させる為のものである。 // 同じノードへのアクセス方法として、 // ・フレーム番号+ノード番号 // ・フレーム番号+shapeに従った多次元の添え字 // の2種類があるので注意すること // ------------------------------------- // アクセス用ポインタクラス定義 // ------------------------------------- class FrameBuffer; // const アクセス用 template <typename Tp, class FrameBufferTp, class PtrTp> class FrameBufferConstPtr_ { friend FrameBufferTp; protected: FrameBufferTp* m_buf; PtrTp m_ptr; protected: FrameBufferConstPtr_(FrameBufferTp* buf) { m_buf = buf; } public: FrameBufferConstPtr_(FrameBufferConstPtr_ const &buf) { m_buf = buf.m_buf; m_ptr = buf.m_ptr; } protected: inline void Lock(void) { m_ptr = m_buf->LockMemoryConst(); } inline void const *GetNodeBaseAddr(index_t node) const { auto addr = (std::uint8_t const *)m_ptr.GetAddr(); return addr + (m_buf->m_frame_stride * node); } inline index_t GetNodeIndex(indices_t const & indices) const { return ConvertIndicesToIndex(indices, m_buf->GetShape()); } inline index_t GetNodeIndex(index_t i0) const { return ConvertIndicesToIndex(i0, m_buf->GetShape()); } inline index_t GetNodeIndex(index_t i0, index_t i1) const { return ConvertIndicesToIndex(i0, i1, m_buf->GetShape()); } inline index_t GetNodeIndex(index_t i0, index_t i1, index_t i2) const { return ConvertIndicesToIndex(i0, i1, i2, m_buf->GetShape()); } inline Tp ReadValue(void const *base, index_t frame) const { return DataType_Read<Tp>(base, frame); } public: inline FrameBuffer const &GetFrameBuffer(void) const { return *m_buf; } inline Tp const *GetAddr(void) const { return (Tp *)m_ptr.GetAddr(); } inline Tp const *GetAddr(index_t node) const { return (Tp *)GetNodeBaseAddr(node); } inline Tp Get(index_t frame, index_t node) const { return ReadValue(GetNodeBaseAddr(node), frame); } inline Tp Get(index_t frame, indices_t indices) const { return Get(frame, GetNodeIndex(indices)); } inline Tp Get(index_t frame, index_t i0, index_t i1) const { return Get(frame, GetNodeIndex(i0, i1)); } inline Tp Get(index_t frame, index_t i0, index_t i1, index_t i2) const { return Get(frame, GetNodeIndex(i0, i1, i2)); } }; // 非const アクセス用 template <typename Tp, class FrameBufferTp, class PtrTp> class FrameBufferPtr_ : public FrameBufferConstPtr_<Tp, FrameBufferTp, PtrTp> { friend FrameBufferTp; using FrameBufferConstPtr_<Tp, FrameBufferTp, PtrTp>::m_buf; using FrameBufferConstPtr_<Tp, FrameBufferTp, PtrTp>::m_ptr; protected: FrameBufferPtr_(FrameBufferTp* buf) : FrameBufferConstPtr_<Tp, FrameBufferTp, PtrTp>(buf) { } inline void Lock(bool new_buf) { m_ptr = m_buf->LockMemory(new_buf); } protected: inline void *GetNodeBaseAddr(index_t node) { BB_DEBUG_ASSERT(node >= 0 && node < m_buf->GetNodeSize()); auto addr = (std::uint8_t *)m_ptr.GetAddr(); return addr + (m_buf->m_frame_stride * node); } inline index_t GetNodeIndex(indices_t const & indices) { return ConvertIndicesToIndex(indices, m_buf->m_node_shape); } inline index_t GetNodeIndex(index_t i0) { return ConvertIndicesToIndex(i0, m_buf->m_node_shape); } inline index_t GetNodeIndex(index_t i0, index_t i1) { return ConvertIndicesToIndex(i0, i1, m_buf->m_node_shape); } inline index_t GetNodeIndex(index_t i0, index_t i1, index_t i2) { return ConvertIndicesToIndex(i0, i1, i2, m_buf->m_node_shape); } inline void WriteValue(void *base, index_t frame, Tp value) { return DataType_Write<Tp>(base, frame, value); } inline void AddValue(void *base, index_t frame, Tp value) { return DataType_Add<Tp>(base, frame, value); } public: inline Tp *GetAddr(void) { return (Tp *)m_ptr.GetAddr(); } inline Tp *GetAddr(index_t node) { return (Tp *)GetNodeBaseAddr(node); } inline void Set(index_t frame, index_t node, Tp value) { return WriteValue(GetNodeBaseAddr(node), frame, value); } inline void Set(index_t frame, indices_t indices, Tp value) { return Set(frame, GetNodeIndex(indices), value); } inline void Set(index_t frame, index_t i0, index_t i1, Tp value) { return Set(frame, GetNodeIndex(i0, i1), value); } inline void Set(index_t frame, index_t i0, index_t i1, index_t i2, Tp value) { return Set(frame, GetNodeIndex(i0, i1, i2), value); } inline void Add(index_t frame, index_t node, Tp value) { return AddValue(GetNodeBaseAddr(node), frame, value); } inline void Add(index_t frame, indices_t indices, Tp value) { return Add(frame, GetNodeIndex(indices), value); } inline void Add(index_t frame, index_t i0, index_t i1, Tp value) { return Add(frame, GetNodeIndex(i0, i1), value); } inline void Add(index_t frame, index_t i0, index_t i1, index_t i2, Tp value) { return Add(frame, GetNodeIndex(i0, i1, i2), value); } }; // NeuralNet用のバッファ class FrameBuffer : public Object { using _super = Object; public: static inline std::string ObjectName(void){ return "FrameBuffer"; } std::string GetObjectName(void) const override { return ObjectName(); } protected: Tensor m_tensor; int m_data_type = 0; index_t m_frame_size = 0; index_t m_frame_stride = 0; index_t m_node_size = 0; indices_t m_node_shape; public: /** * @brief デフォルトコンストラクタ * @detail デフォルトコンストラクタ */ // explicit FrameBuffer(bool host_only=false) : m_tensor(host_only) {} /** * @brief コンストラクタ * @detail コンストラクタ * tensor は node_size サイズの1次元で初期化 * @param frame_size フレーム数 * @param node_size 1フレームのノード数 * @param data_type 1ノードのデータ型 */ // explicit FrameBuffer(index_t frame_size, index_t node_size, int data_type, bool host_only=false) : m_tensor(host_only) // { // Resize(frame_size, indices_t({node_size}), data_type); // } /** * @brief コンストラクタ * @detail コンストラクタ * @param frame_size フレーム数 * @param shape 1フレームのノードを構成するshape * @param data_type 1ノードのデータ型 */ explicit FrameBuffer(index_t frame_size=0, indices_t shape=indices_t(), int data_type=BB_TYPE_FP32, bool host_only=false) : m_tensor(host_only) { if ( frame_size > 0 ) { Resize(frame_size, shape, data_type); } } /** * @brief コピーコンストラクタ * @detail コピーコンストラクタ */ FrameBuffer(FrameBuffer const &buf) { *this = buf; } /** * @brief 代入演算子 * @detail 代入演算子 * 代入演算子でのコピーは、メモリは同じ箇所を指す */ FrameBuffer& operator=(FrameBuffer const &buf) { m_tensor = buf.m_tensor; m_data_type = buf.m_data_type; m_frame_size = buf.m_frame_size; m_frame_stride = buf.m_frame_stride; m_node_size = buf.m_node_size; m_node_shape = buf.m_node_shape; return *this; } #ifdef BB_PYBIND11 template<typename Tp> static FrameBuffer FromNumpy(pybind11::array_t<Tp> ndarray, int data_type, index_t frame_size, index_t frame_stride, indices_t node_shape, bool host_only=false) { FrameBuffer buf; buf.m_tensor = Tensor_<Tp>::FromNumpy(ndarray, host_only); buf.m_data_type = data_type; buf.m_frame_size = frame_size; buf.m_frame_stride = frame_stride; buf.m_node_shape = node_shape; buf.m_node_size = CalcShapeSize(node_shape); return buf; } template<typename Tp> pybind11::array_t<Tp> Numpy(void) { return m_tensor.Numpy<Tp>(); } #endif /** * @brief クローン * @detail クローン * @return メモリ内容をコピーしたクローンを返す */ FrameBuffer Clone(void) const { FrameBuffer clone_buf; clone_buf.m_tensor = m_tensor.Clone(); clone_buf.m_data_type = m_data_type; clone_buf.m_frame_size = m_frame_size; clone_buf.m_frame_stride = m_frame_stride; clone_buf.m_node_size = m_node_size; clone_buf.m_node_shape = m_node_shape; return clone_buf; } bool IsHostOnly(void) const { return m_tensor.IsHostOnly(); } bool Empty(void) const { return (m_frame_size == 0); } /** * @brief デバイスが利用可能か問い合わせる * @detail デバイスが利用可能か問い合わせる * @return デバイスが利用可能ならtrue */ bool IsDeviceAvailable(void) const { return m_tensor.IsDeviceAvailable(); } /** * @brief サイズ設定 * @detail サイズ設定 * @param frame_size フレーム数 * @param shape 1フレームのノードを構成するshape * @param data_type 1ノードのデータ型 */ static index_t CalcFrameStride(int data_type, index_t frame_size) { return ((frame_size * DataType_GetBitSize(data_type) + 255) / 256) * (256 / 8); // frame軸は256bit境界にあわせる(SIMD命令用) } /** * @brief サイズ設定 * @detail サイズ設定 * @param frame_size フレーム数 * @param shape 1フレームのノードを構成するshape * @param data_type 1ノードのデータ型 */ void Resize(index_t frame_size, indices_t shape, int data_type) { m_data_type = data_type; m_frame_size = frame_size; m_frame_stride = CalcFrameStride(data_type, frame_size); // frame軸は256bit境界にあわせる(SIMD命令用) m_node_shape = shape; // Bit型は内部 UINT8 で扱う int tensor_type = data_type; if ( data_type == BB_TYPE_BIT || data_type == BB_TYPE_BINARY ) { tensor_type = BB_TYPE_UINT8; } // サイズ計算 m_node_size = 1; std::vector<index_t> tensor_shape; for ( auto size : shape ) { tensor_shape.push_back(size); m_node_size *= size; } tensor_shape.push_back(m_frame_stride / DataType_GetByteSize(tensor_type)); // メモリ確保 m_tensor.Resize(tensor_shape, tensor_type); } /** * @brief 同じ型同じサイズにする * @detail 同じ型同じサイズにする * @param buf 型とサイズをあわせる対象 */ void ResizeLike(FrameBuffer const &buf) { Resize(buf.GetFrameSize(), buf.GetShape(), buf.GetType()); } template <typename T> void CopyTo_(FrameBuffer& dst, index_t frame_size = 0, index_t src_frame_offset = 0, index_t dst_frame_offset = 0, index_t node_size = 0, index_t src_node_offset = 0, index_t dst_node_offset = 0) const { auto dst_ptr = dst.Lock<T>(); auto src_ptr = LockConst<T>(); for ( index_t node = 0; node < node_size; ++node) { for ( index_t frame = 0; frame < frame_size; ++frame) { dst_ptr.Set(frame + dst_frame_offset, node + dst_node_offset, src_ptr.Get(frame + src_frame_offset, node + src_node_offset)); } } } /** * @brief コピー * @detail コピーを行う */ void CopyTo(FrameBuffer& dst, index_t frame_size = 0, index_t src_frame_offset=0, index_t dst_frame_offset=0, index_t node_size = 0,index_t src_node_offset=0, index_t dst_node_offset=0) const { BB_ASSERT(dst.GetType() == GetType()); if ( node_size <= 0) { node_size = std::min(dst.GetNodeSize() - dst_node_offset, GetFrameSize() - src_node_offset); } if ( frame_size <= 0) { frame_size = std::min(dst.GetFrameSize() - dst_frame_offset, GetFrameSize() - dst_frame_offset); } BB_ASSERT(frame_size + src_frame_offset <= GetFrameSize()); BB_ASSERT(frame_size + dst_frame_offset <= dst.GetFrameSize()); BB_ASSERT(node_size + src_node_offset <= GetNodeSize()); BB_ASSERT(node_size + dst_node_offset <= dst.GetNodeSize()); #ifdef BB_WITH_CUDA if (dst.IsDeviceAvailable() && IsDeviceAvailable()) { if ( DataType_GetBitSize(GetType()) == 32 ) { auto dst_ptr = dst.LockDeviceMemory(); auto src_ptr = LockDeviceMemoryConst(); bbcu_int32_FrameBufferCopy ( (int *)dst_ptr.GetAddr(), (int const *)src_ptr.GetAddr(), (int )node_size, (int )dst_node_offset, (int )src_node_offset, (int )frame_size, (int )dst_frame_offset, (int )src_frame_offset, (int )(dst.GetFrameStride() / 4), (int )(GetFrameStride() / 4) ); return; } if ( DataType_GetBitSize(GetType()) == 1 ) { BB_ASSERT(dst_frame_offset % 32 == 0); BB_ASSERT(src_frame_offset % 32 == 0); BB_ASSERT(frame_size % 32 == 0); auto dst_ptr = dst.LockDeviceMemory(); auto src_ptr = LockDeviceMemoryConst(); bbcu_int32_FrameBufferCopy ( (int *)dst_ptr.GetAddr(), (int const *)src_ptr.GetAddr(), (int )node_size, (int )dst_node_offset, (int )src_node_offset, (int )(frame_size / 32), (int )(dst_frame_offset / 32), (int )(src_frame_offset / 32), (int )(dst.GetFrameStride() / 4), (int )(GetFrameStride() / 4) ); return; } } #endif switch ( GetType() ) { case BB_TYPE_BIT: CopyTo_<Bit >(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_FP32: CopyTo_<float >(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_FP64: CopyTo_<double >(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_INT8: CopyTo_<int8_t >(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_INT16: CopyTo_<int16_t >(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_INT32: CopyTo_<int32_t >(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_INT64: CopyTo_<int64_t >(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_UINT8: CopyTo_<uint8_t >(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_UINT16: CopyTo_<uint16_t>(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_UINT32: CopyTo_<uint32_t>(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; case BB_TYPE_UINT64: CopyTo_<uint64_t>(dst, frame_size, src_frame_offset, dst_frame_offset, node_size, src_node_offset, dst_node_offset); return; } // 他は必要なときに足す BB_ASSERT(0); } // ------------------------------------- // シリアライズ // ------------------------------------- protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_data_type); bb::SaveValue(os, m_frame_size); bb::SaveValue(os, m_frame_stride); bb::SaveValue(os, m_node_shape); m_tensor.DumpObject(os); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_data_type); bb::LoadValue(is, m_frame_size); bb::LoadValue(is, m_frame_stride); bb::LoadValue(is, m_node_shape); m_tensor.LoadObject(is); // 再構築 m_node_size = CalcShapeSize(m_node_shape); } // ------------------------------------- // Serialize(旧) // ------------------------------------- public: void Save(std::ostream &os) const { os.write((char const *)&m_data_type, sizeof(m_data_type)); SaveIndex(os, m_frame_size); SaveIndex(os, m_frame_stride); SaveIndex(os, m_node_size); SaveIndices(os, m_node_shape); m_tensor.Save(os); } void Load(std::istream &is) { is.read((char *)&m_data_type, sizeof(m_data_type)); m_frame_size = LoadIndex(is); m_frame_stride = LoadIndex(is); m_node_size = LoadIndex(is); m_node_shape = LoadIndices(is); m_tensor.Load(is); } void Save(std::string filename) const { std::ofstream ofs(filename, std::ios::binary); Save(ofs); } void Load(std::string filename) { std::ifstream ifs(filename, std::ios::binary); Load(ifs); } #ifdef BB_WITH_CEREAL template <class Archive> void serialize(Archive& archive, std::uint32_t const version) { archive(cereal::make_nvp("data_type", m_data_type)); archive(cereal::make_nvp("frame_size", m_frame_size)); archive(cereal::make_nvp("frame_stride", m_frame_stride)); archive(cereal::make_nvp("node_size", m_node_size)); archive(cereal::make_nvp("node_shape", m_node_shape)); archive(cereal::make_nvp("tensor", m_tensor)); } #endif // ------------------------------------- // Resize // ------------------------------------- /* void Resize(index_t frame_size, index_t i0, int type) { Resize(frame_size, indices_t({i0}), type); } void Resize(index_t frame_size, index_t i0, index_t i1, int type) { Resize(frame_size, indices_t({i0, i1}), type); } void Resize(index_t frame_size, index_t i0, index_t i1, index_t i2, int type) { Resize(frame_size, indices_t({i0, i1, i2}), type); } void Resize(index_t frame_size, index_t i0, index_t i1, index_t i2, index_t i3, int type) { Resize(frame_size, indices_t({i0, i1, i2, i3}), type); } */ void Reshape(std::vector<index_t> shape) { index_t auto_index = -1; index_t total = 1; for (std::intptr_t i = (std::intptr_t)shape.size()-1; i >= 0;--i) { if (shape[i] < 0) { auto_index = i; } else { total *= shape[i]; } } if (auto_index >= 0) { shape[(std::size_t)auto_index] = m_node_size / total; } // 再計算 total = 1; for (auto size : shape) { total *= size; } BB_ASSERT(total == m_node_size); m_node_shape = shape; std::vector<index_t> tensor_shape; for ( auto size : shape ) { tensor_shape.push_back(size); } tensor_shape.push_back(-1); m_tensor.Reshape(tensor_shape); } std::vector<index_t> GetShape(void) const { return m_node_shape; } /** * @brief 内容のゼロ埋め * @detail 内容のゼロ埋め */ void FillZero(void) { m_tensor.FillZero(); } void Fill(double value) { m_tensor = value; } int GetType(void) const { return m_data_type; } index_t GetFrameSize(void) const { return m_frame_size; } index_t GetNodeSize(void) const { return m_node_size; } // debug inline bool IsValidValue(void) const { return m_tensor.IsValidValue(); } // debug template <typename Tp> inline bool IsZero(void) const { for (index_t frame = 0; frame < GetFrameSize(); ++frame ) { for (index_t node = 0; node < GetNodeSize(); ++node ) { if ( GetValue<Tp>(frame, node) != 0 ) { return false; } } } return true; } // --------------------------------- // ダイレクトアクセス用 // --------------------------------- index_t GetFrameStride(void) const { return m_frame_stride; } Memory::Ptr LockMemory(bool new_buf=false) const { return m_tensor.LockMemory(new_buf); } Memory::ConstPtr LockMemoryConst(void) const { return m_tensor.LockMemoryConst(); } Memory::DevPtr LockDeviceMemory(bool new_buf=false) const { return m_tensor.LockDeviceMemory(new_buf); } Memory::DevConstPtr LockDeviceMemoryConst(void) const { return m_tensor.LockDeviceMemoryConst(); } // 型指定アクセス template <typename MemTp, typename ValueTp> inline ValueTp Get(void const *addr, index_t frame, index_t node) const { BB_DEBUG_ASSERT(m_data_type == DataType<MemTp>::type); auto ptr = LockMemoryConst(); return static_cast<ValueTp>(DataType_Read<MemTp>(GetNodeBaseAddr(addr, node), frame)); } template <typename MemTp, typename ValueTp> inline ValueTp Get(void const *addr, index_t frame, std::vector<index_t> const & indices) { return Get<MemTp, ValueTp>(addr, frame, GetNodeIndex(indices)); } template <typename MemTp, typename ValueTp> inline void Set(void *addr, index_t frame, index_t node, ValueTp value) { BB_DEBUG_ASSERT(m_data_type == DataType<MemTp>::type); auto ptr = LockMemory(); DataType_Write<MemTp>(GetNodeBaseAddr(addr, node), frame, static_cast<MemTp>(value)); } template <typename MemTp, typename ValueTp> inline void Set(void *addr, index_t frame, std::vector<index_t> const & indices, ValueTp value) { Set<MemTp, ValueTp>(addr, frame, GetNodeIndex(indices), value); } template <typename MemTp, typename ValueTp> inline void Add(void *addr, index_t frame, index_t node, ValueTp value) { BB_DEBUG_ASSERT(m_data_type == DataType<MemTp>::type); auto ptr = LockMemory(); DataType_Add<MemTp>(GetNodeBaseAddr(addr, node), frame, static_cast<MemTp>(value)); } template <typename MemTp, typename ValueTp> inline void Add(void *addr, index_t frame, std::vector<index_t> const & indices, ValueTp value) { Add<MemTp, ValueTp>(addr, frame, GetNodeIndex(indices), value); } // 汎用アクセス template <typename Tp> inline Tp GetValue(void const *addr, index_t frame, index_t node) { return ReadValue<Tp>(GetNodeBaseAddr(addr, node), frame); } template <typename Tp> inline Tp GetValue(void const *addr, index_t frame, std::vector<index_t> const & indices) { return GetValue<Tp>(addr, frame, GetNodeIndex(indices)); } template <typename Tp> inline void SetValue(void *addr, index_t frame, index_t node, Tp value) { WriteValue<Tp>(GetNodeBaseAddr(addr, node), frame, value); } template <typename Tp> inline void SetValue(void *addr, index_t frame, std::vector<index_t> const & indices, Tp value) { SetValue<Tp>(addr, frame, GetNodeIndex(indices), value); } template <typename Tp> inline void AddValue(void *addr, index_t frame, index_t node, Tp value) { AddValue<Tp>(GetNodeBaseAddr(addr, node), frame, value); } template <typename Tp> inline void AddValue(void *addr, index_t frame, std::vector<index_t> const & indices, Tp value) { AddValue<Tp>(addr, frame, GetNodeIndex(indices), value); } protected: template <typename DT, typename ST> FrameBuffer ConvertTo__(void) { BB_ASSERT(GetType() == DataType<ST>::type); FrameBuffer dst_buf(GetFrameSize(), GetShape(), DataType<DT>::type); #ifdef BB_WITH_CUDA if ( DataType<ST>::type == BB_TYPE_BIT && DataType<DT>::type == BB_TYPE_FP32 && this->IsDeviceAvailable() && dst_buf.IsDeviceAvailable() ) { auto src_ptr = this->LockDeviceMemoryConst(); auto dst_ptr = dst_buf.LockDeviceMemory(true); bbcu_ConvBitToReal<float>( (int const *)src_ptr.GetAddr(), (float *)dst_ptr.GetAddr(), -1.0f, +1.0f, (int)this->GetNodeSize(), (int)this->GetFrameSize(), (int)(this->GetFrameStride() / sizeof(int)), (int)(dst_buf.GetFrameStride() / sizeof(float)) ); return dst_buf; } #endif auto src_ptr = this->LockConst<ST>(); auto dst_ptr = dst_buf.Lock<DT>(); #pragma omp parallel for for (index_t node = 0; node < m_node_size; ++node) { for (index_t frame = 0; frame < m_frame_size; ++frame) { dst_ptr.Set(frame, node, (DT)src_ptr.Get(frame, node)); } } return dst_buf; } template <typename ST> FrameBuffer ConvertTo_(int type) { switch (type) { case BB_TYPE_BIT: return ConvertTo__<Bit, ST>(); case BB_TYPE_FP32: return ConvertTo__<float, ST>(); case BB_TYPE_FP64: return ConvertTo__<double, ST>(); case BB_TYPE_INT8: return ConvertTo__<std::int8_t, ST>(); case BB_TYPE_INT16: return ConvertTo__<std::int16_t, ST>(); case BB_TYPE_INT32: return ConvertTo__<std::int32_t, ST>(); case BB_TYPE_INT64: return ConvertTo__<std::int64_t, ST>(); case BB_TYPE_UINT8: return ConvertTo__<std::int8_t, ST>(); case BB_TYPE_UINT16: return ConvertTo__<std::int16_t, ST>(); case BB_TYPE_UINT32: return ConvertTo__<std::int32_t, ST>(); case BB_TYPE_UINT64: return ConvertTo__<std::int64_t, ST>(); default: BB_ASSERT(0); } return FrameBuffer(); } public: FrameBuffer ConvertTo(int type) { switch (m_data_type) { case BB_TYPE_BIT: return ConvertTo_<Bit>(type); case BB_TYPE_FP32: return ConvertTo_<float>(type); case BB_TYPE_FP64: return ConvertTo_<double>(type); case BB_TYPE_INT8: return ConvertTo_<std::int8_t >(type); case BB_TYPE_INT16: return ConvertTo_<std::int16_t>(type); case BB_TYPE_INT32: return ConvertTo_<std::int32_t>(type); case BB_TYPE_INT64: return ConvertTo_<std::int64_t>(type); case BB_TYPE_UINT8: return ConvertTo_<std::int8_t>(type); case BB_TYPE_UINT16: return ConvertTo_<std::int16_t>(type); case BB_TYPE_UINT32: return ConvertTo_<std::int32_t>(type); case BB_TYPE_UINT64: return ConvertTo_<std::int64_t>(type); default: BB_ASSERT(0); } return FrameBuffer(); } protected: // --------------------------------- // アクセス用 // --------------------------------- void *GetNodeBaseAddr(void* base_addr, index_t node) const { BB_DEBUG_ASSERT(node < m_node_size); auto addr = (std::uint8_t*)base_addr; return addr + (m_frame_stride * node); } void const *GetNodeBaseAddr(const void* base_addr, index_t node) const { BB_DEBUG_ASSERT(node < m_node_size); auto addr = (std::uint8_t const *)base_addr; return addr + (m_frame_stride * node); } inline index_t GetNodeIndex(std::vector<index_t> const & indices) const { BB_DEBUG_ASSERT(indices.size() == m_node_shape.size()); index_t stride = 1; index_t index = 0; for ( std::intptr_t i = (std::intptr_t)m_node_shape.size()-1; i>= 0; --i ) { BB_DEBUG_ASSERT(indices[i] >= 0 && indices[i] < m_node_shape[i]); index += stride * indices[i]; stride *= m_node_shape[i]; } return index; } template<typename Tp> Tp ReadValue(void *base, index_t frame) const { switch (m_data_type) { case BB_TYPE_BIT: return static_cast<Tp>(DataType_Read<Bit> (base, frame)); break; case BB_TYPE_FP32: return static_cast<Tp>(DataType_Read<float> (base, frame)); break; case BB_TYPE_FP64: return static_cast<Tp>(DataType_Read<double> (base, frame)); break; case BB_TYPE_INT8: return static_cast<Tp>(DataType_Read<std::int8_t> (base, frame)); break; case BB_TYPE_INT16: return static_cast<Tp>(DataType_Read<std::int16_t>(base, frame)); break; case BB_TYPE_INT32: return static_cast<Tp>(DataType_Read<std::int32_t>(base, frame)); break; case BB_TYPE_INT64: return static_cast<Tp>(DataType_Read<std::int64_t>(base, frame)); break; case BB_TYPE_UINT8: return static_cast<Tp>(DataType_Read<std::int8_t> (base, frame)); break; case BB_TYPE_UINT16: return static_cast<Tp>(DataType_Read<std::int16_t>(base, frame)); break; case BB_TYPE_UINT32: return static_cast<Tp>(DataType_Read<std::int32_t>(base, frame)); break; case BB_TYPE_UINT64: return static_cast<Tp>(DataType_Read<std::int64_t>(base, frame)); break; default: BB_ASSERT(0); } return 0; } template<typename Tp> void WriteValue(void *base, index_t frame, Tp value) const { switch (m_data_type) { case BB_TYPE_BIT: DataType_Write<Bit> (base, frame, static_cast<Bit> (value)); break; case BB_TYPE_FP32: DataType_Write<float> (base, frame, static_cast<float> (value)); break; case BB_TYPE_FP64: DataType_Write<double> (base, frame, static_cast<double> (value)); break; case BB_TYPE_INT8: DataType_Write<std::int8_t> (base, frame, static_cast<int8_t> (value)); break; case BB_TYPE_INT16: DataType_Write<std::int16_t>(base, frame, static_cast<int16_t> (value)); break; case BB_TYPE_INT32: DataType_Write<std::int32_t>(base, frame, static_cast<int32_t> (value)); break; case BB_TYPE_INT64: DataType_Write<std::int64_t>(base, frame, static_cast<int64_t> (value)); break; case BB_TYPE_UINT8: DataType_Write<std::int8_t> (base, frame, static_cast<uint8_t> (value)); break; case BB_TYPE_UINT16: DataType_Write<std::int16_t>(base, frame, static_cast<uint16_t>(value)); break; case BB_TYPE_UINT32: DataType_Write<std::int32_t>(base, frame, static_cast<uint32_t>(value)); break; case BB_TYPE_UINT64: DataType_Write<std::int64_t>(base, frame, static_cast<uint64_t>(value)); break; default: BB_ASSERT(0); } } template<typename Tp> void AddValue(void *base, index_t frame, Tp value) const { switch (m_data_type) { case BB_TYPE_BIT: DataType_Add<Bit> (base, frame, static_cast<Bit> (value)); break; case BB_TYPE_FP32: DataType_Add<float> (base, frame, static_cast<float> (value)); break; case BB_TYPE_FP64: DataType_Add<double> (base, frame, static_cast<double> (value)); break; case BB_TYPE_INT8: DataType_Add<std::int8_t> (base, frame, static_cast<int8_t> (value)); break; case BB_TYPE_INT16: DataType_Add<std::int16_t>(base, frame, static_cast<int16_t> (value)); break; case BB_TYPE_INT32: DataType_Add<std::int32_t>(base, frame, static_cast<int32_t> (value)); break; case BB_TYPE_INT64: DataType_Add<std::int64_t>(base, frame, static_cast<int64_t> (value)); break; case BB_TYPE_UINT8: DataType_Add<std::int8_t> (base, frame, static_cast<uint8_t> (value)); break; case BB_TYPE_UINT16: DataType_Add<std::int16_t>(base, frame, static_cast<uint16_t>(value)); break; case BB_TYPE_UINT32: DataType_Add<std::int32_t>(base, frame, static_cast<uint32_t>(value)); break; case BB_TYPE_UINT64: DataType_Add<std::int64_t>(base, frame, static_cast<uint64_t>(value)); break; default: BB_ASSERT(0); } } public: friend FrameBufferConstPtr_<Bit const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<float const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<double const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<int8_t const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<int16_t const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<int32_t const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<int64_t const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<uint8_t const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<uint16_t const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<uint32_t const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<uint64_t const, FrameBuffer const, Memory::ConstPtr>; friend FrameBufferConstPtr_<Bit , FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<float , FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<double , FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<int8_t , FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<int16_t , FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<int32_t , FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<int64_t , FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<uint8_t , FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<uint16_t, FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<uint32_t, FrameBuffer, Memory::Ptr>; friend FrameBufferConstPtr_<uint64_t, FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<Bit , FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<float , FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<double , FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<int8_t , FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<int16_t , FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<int32_t , FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<int64_t , FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<uint8_t , FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<uint16_t, FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<uint32_t, FrameBuffer, Memory::Ptr>; friend FrameBufferPtr_<uint64_t, FrameBuffer, Memory::Ptr>; template <typename Tp> auto LockConst(void) const { FrameBufferConstPtr_<Tp const, FrameBuffer const, Memory::ConstPtr> ptr(this); ptr.Lock(); return ptr; } template <typename Tp> auto Lock(bool new_buf=false) { FrameBufferPtr_<Tp, FrameBuffer, Memory::Ptr> ptr(this); ptr.Lock(new_buf); return ptr; } public: // 型指定アクセス template <typename MemTp, typename ValueTp> inline ValueTp Get(index_t frame, index_t node) const { BB_DEBUG_ASSERT(m_data_type == DataType<MemTp>::type); auto ptr = LockMemoryConst(); return static_cast<ValueTp>(DataType_Read<MemTp>(GetNodeBaseAddr(ptr.GetAddr(), node), frame)); } template <typename MemTp, typename ValueTp> inline ValueTp Get(index_t frame, std::vector<index_t> const & indices) { return Get<MemTp, ValueTp>(frame, GetNodeIndex(indices)); } template <typename MemTp, typename ValueTp> inline void Set(index_t frame, index_t node, ValueTp value) { BB_DEBUG_ASSERT(m_data_type == DataType<MemTp>::type); auto ptr = LockMemory(); DataType_Write<MemTp>(GetNodeBaseAddr(ptr.GetAddr(), node), frame, static_cast<MemTp>(value)); } template <typename MemTp, typename ValueTp> inline void Set(index_t frame, std::vector<index_t> const & indices, ValueTp value) { Set<MemTp, ValueTp>(frame, GetNodeIndex(indices), value); } template <typename MemTp, typename ValueTp> inline void Add(index_t frame, index_t node, ValueTp value) { BB_DEBUG_ASSERT(m_data_type == DataType<MemTp>::type); auto ptr = LockMemory(); DataType_Add<MemTp>(GetNodeBaseAddr(ptr.GetAddr(), node), frame, static_cast<MemTp>(value)); } template <typename MemTp, typename ValueTp> inline void Add(index_t frame, std::vector<index_t> const & indices, ValueTp value) { Add<MemTp, ValueTp>(frame, GetNodeIndex(indices), value); } // 汎用アクセス template <typename Tp> inline Tp GetValue(index_t frame, index_t node) const { auto ptr = LockMemory(); return ReadValue<Tp>(GetNodeBaseAddr(ptr.GetAddr(), node), frame); } template <typename Tp> inline Tp GetValue(index_t frame, std::vector<index_t> const & indices) const { return GetValue<Tp>(frame, GetNodeIndex(indices)); } template <typename Tp> inline void SetValue(index_t frame, index_t node, Tp value) { auto ptr = LockMemory(); WriteValue<Tp>(GetNodeBaseAddr(ptr.GetAddr(), node), frame, value); } template <typename Tp> inline void SetValue(index_t frame, std::vector<index_t> const & indices, Tp value) { SetValue<Tp>(frame, GetNodeIndex(indices), value); } template <typename Tp> inline void AddValue(index_t frame, index_t node, Tp value) { auto ptr = LockMemory(); AddValue<Tp>(GetNodeBaseAddr(ptr.GetAddr(), node), frame, value); } template <typename Tp> inline void AddValue(index_t frame, std::vector<index_t> const & indices, Tp value) { AddValue<Tp>(frame, GetNodeIndex(indices), value); } inline void SetBit (index_t frame, index_t node, Bit value) { SetValue<Bit >(frame, node, value); } inline void SetFP32 (index_t frame, index_t node, float value) { SetValue<float >(frame, node, value); } inline void SetFP64 (index_t frame, index_t node, double value) { SetValue<double >(frame, node, value); } inline void SetINT8 (index_t frame, index_t node, std::int8_t value) { SetValue<std::int8_t >(frame, node, value); } inline void SetINT16 (index_t frame, index_t node, std::int16_t value) { SetValue<std::int16_t >(frame, node, value); } inline void SetINT32 (index_t frame, index_t node, std::int32_t value) { SetValue<std::int32_t >(frame, node, value); } inline void SetINT64 (index_t frame, index_t node, std::int64_t value) { SetValue<std::int64_t >(frame, node, value); } inline void SetUINT8 (index_t frame, index_t node, std::uint8_t value) { SetValue<std::uint8_t >(frame, node, value); } inline void SetUINT16(index_t frame, index_t node, std::uint16_t value) { SetValue<std::uint16_t>(frame, node, value); } inline void SetUINT32(index_t frame, index_t node, std::uint32_t value) { SetValue<std::uint32_t>(frame, node, value); } inline void SetUINT64(index_t frame, index_t node, std::uint64_t value) { SetValue<std::uint64_t>(frame, node, value); } inline void SetBit (index_t frame, std::vector<index_t> const & indices, Bit value) { SetValue<Bit >(frame, indices, value); } inline void SetFP32 (index_t frame, std::vector<index_t> const & indices, float value) { SetValue<float >(frame, indices, value); } inline void SetFP64 (index_t frame, std::vector<index_t> const & indices, double value) { SetValue<double >(frame, indices, value); } inline void SetINT8 (index_t frame, std::vector<index_t> const & indices, std::int8_t value) { SetValue<std::int8_t >(frame, indices, value); } inline void SetINT16 (index_t frame, std::vector<index_t> const & indices, std::int16_t value) { SetValue<std::int16_t >(frame, indices, value); } inline void SetINT32 (index_t frame, std::vector<index_t> const & indices, std::int32_t value) { SetValue<std::int32_t >(frame, indices, value); } inline void SetINT64 (index_t frame, std::vector<index_t> const & indices, std::int64_t value) { SetValue<std::int64_t >(frame, indices, value); } inline void SetUINT8 (index_t frame, std::vector<index_t> const & indices, std::uint8_t value) { SetValue<std::uint8_t >(frame, indices, value); } inline void SetUINT16(index_t frame, std::vector<index_t> const & indices, std::uint16_t value) { SetValue<std::uint16_t>(frame, indices, value); } inline void SetUINT32(index_t frame, std::vector<index_t> const & indices, std::uint32_t value) { SetValue<std::uint32_t>(frame, indices, value); } inline void SetUINT64(index_t frame, std::vector<index_t> const & indices, std::uint64_t value) { SetValue<std::uint64_t>(frame, indices, value); } inline Bit GetBit (index_t frame, index_t node) const { return GetValue<Bit >(frame, node); } inline float GetFP32 (index_t frame, index_t node) const { return GetValue<float >(frame, node); } inline double GetFP64 (index_t frame, index_t node) const { return GetValue<double >(frame, node); } inline std::int8_t GetINT8 (index_t frame, index_t node) const { return GetValue<std::int8_t >(frame, node); } inline std::int16_t GetINT16 (index_t frame, index_t node) const { return GetValue<std::int16_t >(frame, node); } inline std::int32_t GetINT32 (index_t frame, index_t node) const { return GetValue<std::int32_t >(frame, node); } inline std::int64_t GetINT64 (index_t frame, index_t node) const { return GetValue<std::int64_t >(frame, node); } inline std::uint8_t GetUINT8 (index_t frame, index_t node) const { return GetValue<std::uint8_t >(frame, node); } inline std::uint16_t GetUINT16(index_t frame, index_t node) const { return GetValue<std::uint16_t>(frame, node); } inline std::uint32_t GetUINT32(index_t frame, index_t node) const { return GetValue<std::uint32_t>(frame, node); } inline std::uint64_t GetUINT64(index_t frame, index_t node) const { return GetValue<std::uint64_t>(frame, node); } inline Bit GetBit (index_t frame, std::vector<index_t> const & indices) { return GetValue<Bit >(frame, indices); } inline float GetFP32 (index_t frame, std::vector<index_t> const & indices) { return GetValue<float >(frame, indices); } inline double GetFP64 (index_t frame, std::vector<index_t> const & indices) { return GetValue<double >(frame, indices); } inline std::int8_t GetINT8 (index_t frame, std::vector<index_t> const & indices) { return GetValue<std::int8_t >(frame, indices); } inline std::int16_t GetINT16 (index_t frame, std::vector<index_t> const & indices) { return GetValue<std::int16_t >(frame, indices); } inline std::int32_t GetINT32 (index_t frame, std::vector<index_t> const & indices) { return GetValue<std::int32_t >(frame, indices); } inline std::int64_t GetINT64 (index_t frame, std::vector<index_t> const & indices) { return GetValue<std::int64_t >(frame, indices); } inline std::uint8_t GetUINT8 (index_t frame, std::vector<index_t> const & indices) { return GetValue<std::uint8_t >(frame, indices); } inline std::uint16_t GetUINT16(index_t frame, std::vector<index_t> const & indices) { return GetValue<std::uint16_t>(frame, indices); } inline std::uint32_t GetUINT32(index_t frame, std::vector<index_t> const & indices) { return GetValue<std::uint32_t>(frame, indices); } inline std::uint64_t GetUINT64(index_t frame, std::vector<index_t> const & indices) { return GetValue<std::uint64_t>(frame, indices); } template<typename Tp> void SetVector(index_t frame, std::vector<Tp> const &data) { BB_ASSERT(data.size() == (size_t)m_node_size); BB_ASSERT(frame >= 0 && frame < m_frame_size); for (index_t node = 0; node < m_node_size; ++node) { SetValue<Tp>(frame, node, data[node]); } } template<typename Tp> void SetVector(std::vector< std::vector<Tp> > const &data) { BB_ASSERT(data.size() == (size_t)m_frame_size); for (index_t frame = 0; frame < m_frame_size; ++frame) { BB_ASSERT(data[frame].size() == (size_t)m_node_size); for (index_t node = 0; node < m_node_size; ++node) { SetValue<Tp>(frame, node, data[frame][node]); } } } template<typename Tp> void SetVector(std::vector< std::vector<Tp> > const &data, index_t offset) { BB_ASSERT(GetType() == DataType<Tp>::type); BB_ASSERT(offset + m_frame_size <= (index_t)data.size() ); auto ptr = Lock<Tp>(); for (index_t frame = 0; frame < m_frame_size; ++frame) { BB_ASSERT(data[(size_t)frame].size() == (size_t)m_node_size); for (index_t node = 0; node < m_node_size; ++node) { ptr.Set(frame, node, data[(size_t)(frame + offset)][(size_t)node]); } } } template<typename Tp> std::vector<Tp> GetVector(index_t frame) const { BB_ASSERT(frame >= 0 && frame < m_frame_size); std::vector<Tp> data(m_node_size); for (index_t node = 0; node < m_node_size; ++node) { data[node] = GetValue<Tp>(frame, node); } return data; } template<typename BufType, typename VecType=float> void SetData_(std::vector< std::vector<VecType> > const &data, index_t offset=0) { BB_ASSERT(GetType() == DataType<BufType>::type); BB_ASSERT(offset + (index_t)data.size() <= m_frame_size); index_t size = (index_t)data.size(); if ( size + offset > m_frame_size ) { size = m_frame_size - offset; } auto ptr = Lock<BufType>(); for (index_t i = 0; i < size; ++i) { index_t frame = i + offset; BB_ASSERT(data[i].size() == (size_t)m_node_size); for (index_t node = 0; node < m_node_size; ++node) { ptr.Set(frame, node, (BufType)data[i][node]); } } } template<typename BufType, typename VecType=float> std::vector< std::vector<VecType> > GetData_(index_t size=0, index_t offset=0) { if ( size <= 0 ) { size = m_frame_size - offset; } BB_ASSERT(GetType() == DataType<BufType>::type); BB_ASSERT(offset + size <= m_frame_size); std::vector< std::vector<VecType> > data(size); auto ptr = LockConst<BufType>(); for (index_t i = 0; i < size; ++i) { data[i].resize(m_node_size); index_t frame = i + offset; for (index_t node = 0; node < m_node_size; ++node) { data[i][node] = (VecType)ptr.Get(frame, node); } } return data; } template<typename VecType=float> void SetData(std::vector< std::vector<VecType> > const &data, index_t offset=0) { switch (GetType()) { case BB_TYPE_BIT: SetData_<bb::Bit, VecType>(data, offset); break; case BB_TYPE_FP32: SetData_<float, VecType>(data, offset); break; case BB_TYPE_FP64: SetData_<double, VecType>(data, offset); break; case BB_TYPE_INT8: SetData_<std::int8_t, VecType>(data, offset); break; case BB_TYPE_INT16: SetData_<std::int16_t, VecType>(data, offset); break; case BB_TYPE_INT32: SetData_<std::int32_t, VecType>(data, offset); break; case BB_TYPE_INT64: SetData_<std::int64_t, VecType>(data, offset); break; case BB_TYPE_UINT8: SetData_<std::uint8_t, VecType>(data, offset); break; case BB_TYPE_UINT16: SetData_<std::uint16_t, VecType>(data, offset); break; case BB_TYPE_UINT32: SetData_<std::uint32_t, VecType>(data, offset); break; case BB_TYPE_UINT64: SetData_<std::uint64_t, VecType>(data, offset); break; default: BB_ASSERT(0); } } template<typename VecType=float> std::vector< std::vector<VecType> > GetData(index_t size=0, index_t offset=0) { switch (GetType()) { case BB_TYPE_BIT: return GetData_<bb::Bit, VecType>(size, offset); case BB_TYPE_FP32: return GetData_<float, VecType>(size, offset); case BB_TYPE_FP64: return GetData_<double, VecType>(size, offset); case BB_TYPE_INT8: return GetData_<std::int8_t, VecType>(size, offset); case BB_TYPE_INT16: return GetData_<std::int16_t, VecType>(size, offset); case BB_TYPE_INT32: return GetData_<std::int32_t, VecType>(size, offset); case BB_TYPE_INT64: return GetData_<std::int64_t, VecType>(size, offset); case BB_TYPE_UINT8: return GetData_<std::uint8_t, VecType>(size, offset); case BB_TYPE_UINT16: return GetData_<std::uint16_t, VecType>(size, offset); case BB_TYPE_UINT32: return GetData_<std::uint32_t, VecType>(size, offset); case BB_TYPE_UINT64: return GetData_<std::uint64_t, VecType>(size, offset); default: BB_ASSERT(0); } return std::vector< std::vector<VecType> >(); } // テンソルの設定 public: /* template<typename Tp> void SetTensor(index_t frame, Tensor const &tensor) { BB_ASSERT(m_data_type == DataType<Tp>::type); BB_ASSERT(tensor.GetType() == DataType<Tp>::type); BB_ASSERT(tensor.GetShape() == m_tensor.GetShape()); int dim = tensor.GetDim(); indices_t indices(tensor.GetDim(), 0); for ( ; ; ) { // m_tensor.template At<Tp>(indices) = tensor.template At<Tp>(indices); int i = 0; for ( ; ; ) { indices[i]++; if ( indices[i] < m_node_shape[i] ) { break; } indices[i] = 0; i++; if (i >= dim) { return; } } } } */ // フレームの部分切り出し FrameBuffer FrameRange(index_t size, index_t offset=0) { BB_ASSERT(offset >= 0 && offset < m_frame_size); BB_ASSERT(size >= 0 && size < m_frame_size - offset); FrameBuffer buf(size, m_node_shape, m_data_type); auto src_ptr = m_tensor.LockMemoryConst(); auto dst_ptr = buf.m_tensor.LockMemory(true); auto src_addr = (std::int8_t const *)src_ptr.GetAddr(); auto dst_addr = (std::int8_t *)dst_ptr.GetAddr(); if (m_data_type == BB_TYPE_BIT && (offset % 8) != 0 ) { #pragma omp parallel for for (index_t node = 0; node < m_node_size; ++node) { for (index_t frame = 0; frame < size; ++frame) { auto val = DataType_Read<Bit>(src_addr + m_frame_stride * node, frame + offset); DataType_Write<Bit>(dst_addr + m_frame_stride * node, frame, val); } } } else { int unit = DataType_GetBitSize(m_data_type); index_t byte_offset = (offset * unit + 7) / 8; index_t byte_size = (size * unit + 7) / 8; #pragma omp parallel for for (index_t node = 0; node < m_node_size; ++node) { memcpy(dst_addr + buf.m_frame_stride * node, src_addr + m_frame_stride * node + byte_offset, (std::size_t)byte_size); } } return buf; } FrameBuffer Range(index_t size, index_t offset=0) { BB_ASSERT(offset >= 0 && offset < m_node_size); BB_ASSERT(size >= 0 && size < m_node_size - offset); FrameBuffer buf(m_frame_size, {size}, m_data_type); auto src_ptr = m_tensor.LockMemoryConst(); auto dst_ptr = buf.m_tensor.LockMemory(true); auto src_addr = (std::int8_t const *)src_ptr.GetAddr(); auto dst_addr = (std::int8_t *)dst_ptr.GetAddr(); index_t stride_size = GetFrameStride(); memcpy(dst_addr, src_addr + (m_frame_stride * offset), (std::size_t)(stride_size * size)); return buf; } FrameBuffer Concatenate(FrameBuffer const& buf) { BB_ASSERT(buf.GetType() == GetType()); BB_ASSERT(buf.m_frame_size == m_frame_size); BB_ASSERT(buf.m_frame_stride == m_frame_stride); FrameBuffer dst_buf(m_frame_size, {m_node_size + buf.m_node_size}, m_data_type); auto dst_ptr = dst_buf.m_tensor.LockMemory(true); auto dst_addr = (std::int8_t *)dst_ptr.GetAddr(); { auto src0_ptr = m_tensor.LockMemoryConst(); auto src0_addr = (std::int8_t const *)src0_ptr.GetAddr(); memcpy(dst_addr, src0_addr, (std::size_t)(m_frame_stride * m_node_size)); dst_addr += m_frame_stride * m_node_size; } { auto src1_ptr = buf.m_tensor.LockMemoryConst(); auto src1_addr = (std::int8_t const *)src1_ptr.GetAddr(); memcpy(dst_addr, src1_addr, (std::size_t)(buf.m_frame_stride * buf.m_node_size)); } return dst_buf; } // ------------------------------------- // 演算 // ------------------------------------- protected: template<typename T> bool _IsNan(void) { #ifdef BB_WITH_CUDA if ( this->IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { Tensor_<int> result(1); { auto ptr_buf = this->LockDeviceMemoryConst(); auto ptr_result = result.LockDeviceMemory(true); bbcu_FrameBuf_IsnNan ( (int *)ptr_result.GetAddr(), (T const *)ptr_buf.GetAddr(), (int )this->GetNodeSize(), (int )this->GetFrameSize(), (int )(this->GetFrameStride() / sizeof(T)) ); } { auto dst_ptr = result.LockConst(); return dst_ptr(0) != 0; } } #endif auto ptr = LockConst<T>(); for ( index_t node = 0; node < GetNodeSize(); ++node ) { for ( index_t frame = 0; frame < GetFrameSize(); ++frame ) { if ( std::isnan(ptr.Get(frame, node)) ) { return true; } } } return false; } template<typename T> double _Min(void) { T value = std::numeric_limits<T>::max(); auto ptr = LockConst<T>(); for ( index_t node = 0; node < GetNodeSize(); ++node ) { for ( index_t frame = 0; frame < GetFrameSize(); ++frame ) { value = std::min(value, ptr.Get(frame, node)); } } return (double)value; } template<typename T> double _MinFp(void) { #ifdef BB_WITH_CUDA if ( this->IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { Tensor_<T> result(1); { auto ptr_buf = this->LockDeviceMemoryConst(); auto ptr_result = result.LockDeviceMemory(true); bbcu_FrameBuf_Min<T> ( (T *)ptr_result.GetAddr(), (T const *)ptr_buf.GetAddr(), (int )this->GetNodeSize(), (int )this->GetFrameSize(), (int )(this->GetFrameStride() / sizeof(T)) ); } { auto dst_ptr = result.LockConst(); return (double)dst_ptr(0); } } #endif return _Min<T>(); } template<typename T> double _Max(void) { T value = std::numeric_limits<T>::lowest(); auto ptr = LockConst<T>(); for ( index_t node = 0; node < GetNodeSize(); ++node ) { for ( index_t frame = 0; frame < GetFrameSize(); ++frame ) { value = std::max(value, ptr.Get(frame, node)); } } return (double)value; } template<typename T> double _MaxFp(void) { #ifdef BB_WITH_CUDA if ( this->IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { Tensor_<T> result(1); { auto ptr_buf = this->LockDeviceMemoryConst(); auto ptr_result = result.LockDeviceMemory(true); bbcu_FrameBuf_Max<T> ( (T *)ptr_result.GetAddr(), (T const *)ptr_buf.GetAddr(), (int )this->GetNodeSize(), (int )this->GetFrameSize(), (int )(this->GetFrameStride() / sizeof(T)) ); } { auto dst_ptr = result.LockConst(); return (double)dst_ptr(0); } } #endif return _Max<T>(); } public: inline FrameBuffer& operator=(double src) { m_tensor = src; return *this; } inline FrameBuffer& operator+=(FrameBuffer src) { m_tensor += src.m_tensor; return *this; } inline FrameBuffer& operator+=(double src) { m_tensor += src; return *this; } inline FrameBuffer& operator-=(FrameBuffer src) { m_tensor -= src.m_tensor; return *this; } inline FrameBuffer& operator-=(double src) { m_tensor -= src; return *this; } inline FrameBuffer& operator*=(FrameBuffer src) { m_tensor *= src.m_tensor; return *this; } inline FrameBuffer& operator*=(double src) { m_tensor *= src; return *this; } inline FrameBuffer& operator/=(FrameBuffer src) { m_tensor /= src.m_tensor; return *this; } inline FrameBuffer& operator/=(double src) { m_tensor /= src; return *this; } bool IsNan(void) { switch ( GetType() ) { case BB_TYPE_FP32: return _IsNan<float>(); case BB_TYPE_FP64: return _IsNan<double>(); } return false; } double Min(void) { switch ( GetType() ) { case BB_TYPE_FP32: return _MinFp<float >(); case BB_TYPE_FP64: return _MinFp<double>(); case BB_TYPE_BIT: return _Min<Bit >(); case BB_TYPE_INT8: return _Min<int8_t >(); case BB_TYPE_INT16: return _Min<int16_t >(); case BB_TYPE_INT32: return _Min<int32_t >(); case BB_TYPE_INT64: return _Min<int64_t >(); case BB_TYPE_UINT8: return _Min<uint8_t >(); case BB_TYPE_UINT16: return _Min<uint16_t>(); case BB_TYPE_UINT32: return _Min<uint32_t>(); case BB_TYPE_UINT64: return _Min<uint64_t>(); default: BB_ASSERT(0); } return 0; } double Max(void) { switch ( GetType() ) { case BB_TYPE_FP32: return _MaxFp<float >(); case BB_TYPE_FP64: return _MaxFp<double>(); case BB_TYPE_BIT: return _Max<Bit >(); case BB_TYPE_INT8: return _Max<int8_t >(); case BB_TYPE_INT16: return _Max<int16_t >(); case BB_TYPE_INT32: return _Max<int32_t >(); case BB_TYPE_INT64: return _Max<int64_t >(); case BB_TYPE_UINT8: return _Max<uint8_t >(); case BB_TYPE_UINT16: return _Max<uint16_t>(); case BB_TYPE_UINT32: return _Max<uint32_t>(); case BB_TYPE_UINT64: return _Max<uint64_t>(); default: BB_ASSERT(0); } return 0; } FrameBuffer Sqrt(void) { FrameBuffer dst(GetFrameSize(), GetShape(), GetType(), IsHostOnly()); dst.m_tensor = m_tensor.Sqrt(); return dst; } FrameBuffer Exp(void) { FrameBuffer dst(GetFrameSize(), GetShape(), GetType(), IsHostOnly()); dst.m_tensor = m_tensor.Exp(); return dst; } double Sum(void) { return m_tensor.Sum(); } double Norm(void) { return std::sqrt((*this * *this).Sum()); } friend FrameBuffer operator+(FrameBuffer const &src0, FrameBuffer const &src1); friend FrameBuffer operator+(FrameBuffer const &src0, double src1); friend FrameBuffer operator+(double src0, FrameBuffer const &src1); friend FrameBuffer operator-(FrameBuffer const &src0, FrameBuffer const &src1); friend FrameBuffer operator-(FrameBuffer const &src0, double src1); friend FrameBuffer operator-(double src0, FrameBuffer const &src1); friend FrameBuffer operator*(FrameBuffer const &src0, FrameBuffer const &src1); friend FrameBuffer operator*(FrameBuffer const &src0, double src1); friend FrameBuffer operator*(double src0, FrameBuffer const &src1); friend FrameBuffer operator/(FrameBuffer const &src0, FrameBuffer const &src1); friend FrameBuffer operator/(FrameBuffer const &src0, double src1); friend FrameBuffer operator/(double src0, FrameBuffer const &src1); friend FrameBuffer Sqrt(FrameBuffer const &src); friend FrameBuffer Exp(FrameBuffer const &src); }; inline FrameBuffer operator+(FrameBuffer const &src0, FrameBuffer const &src1) { FrameBuffer dst(src0.GetFrameSize(), src0.GetShape(), src0.GetType(), src0.IsHostOnly()); dst.m_tensor = src0.m_tensor + src1.m_tensor; return dst; } inline FrameBuffer operator+(FrameBuffer const &src0, double src1) { FrameBuffer dst(src0.GetFrameSize(), src0.GetShape(), src0.GetType(), src0.IsHostOnly()); dst.m_tensor = src0.m_tensor + src1; return dst; } inline FrameBuffer operator+(double src0, FrameBuffer const &src1) { FrameBuffer dst(src1.GetFrameSize(), src1.GetShape(), src1.GetType(), src1.IsHostOnly()); dst.m_tensor = src0 + src1.m_tensor; return dst; } inline FrameBuffer operator-(FrameBuffer const &src0, FrameBuffer const &src1) { FrameBuffer dst(src0.GetFrameSize(), src0.GetShape(), src0.GetType(), src0.IsHostOnly()); dst.m_tensor = src0.m_tensor - src1.m_tensor; return dst; } inline FrameBuffer operator-(FrameBuffer const &src0, double src1) { FrameBuffer dst(src0.GetFrameSize(), src0.GetShape(), src0.GetType(), src0.IsHostOnly()); dst.m_tensor = src0.m_tensor - src1; return dst; } inline FrameBuffer operator-(double src0, FrameBuffer const &src1) { FrameBuffer dst(src1.GetFrameSize(), src1.GetShape(), src1.GetType(), src1.IsHostOnly()); dst.m_tensor = src0 - src1.m_tensor; return dst; } inline FrameBuffer operator*(FrameBuffer const &src0, FrameBuffer const &src1) { FrameBuffer dst(src0.GetFrameSize(), src0.GetShape(), src0.GetType(), src0.IsHostOnly()); dst.m_tensor = src0.m_tensor * src1.m_tensor; return dst; } inline FrameBuffer operator*(FrameBuffer const &src0, double src1) { FrameBuffer dst(src0.GetFrameSize(), src0.GetShape(), src0.GetType(), src0.IsHostOnly()); dst.m_tensor = src0.m_tensor * src1; return dst; } inline FrameBuffer operator*(double src0, FrameBuffer const &src1) { FrameBuffer dst(src1.GetFrameSize(), src1.GetShape(), src1.GetType(), src1.IsHostOnly()); dst.m_tensor = src0 * src1.m_tensor; return dst; } inline FrameBuffer operator/(FrameBuffer const &src0, FrameBuffer const &src1) { FrameBuffer dst(src0.GetFrameSize(), src0.GetShape(), src0.GetType(), src0.IsHostOnly()); dst.m_tensor = src0.m_tensor / src1.m_tensor; return dst; } inline FrameBuffer operator/(FrameBuffer const &src0, double src1) { FrameBuffer dst(src0.GetFrameSize(), src0.GetShape(), src0.GetType(), src0.IsHostOnly()); dst.m_tensor = src0.m_tensor / src1; return dst; } inline FrameBuffer operator/(double src0, FrameBuffer const &src1) { FrameBuffer dst(src1.GetFrameSize(), src1.GetShape(), src1.GetType(), src1.IsHostOnly()); dst.m_tensor = src0 / src1.m_tensor; return dst; } inline FrameBuffer Sqrt(FrameBuffer const &src) { FrameBuffer dst(src.GetFrameSize(), src.GetShape(), src.GetType(), src.IsHostOnly()); dst.m_tensor = Sqrt(src.m_tensor); return dst; } inline FrameBuffer Exp(FrameBuffer const &src) { FrameBuffer dst(src.GetFrameSize(), src.GetShape(), src.GetType(), src.IsHostOnly()); dst.m_tensor = Exp(src.m_tensor); return dst; } template<typename T> std::ostream &operator<<(std::ostream& os, FrameBufferConstPtr_<T const, FrameBuffer const, Memory::ConstPtr> ptr) { os << "[\n"; for ( index_t frame = 0; frame < ptr.GetFrameBuffer().GetFrameSize(); ++frame ) { os << " ["; for ( index_t node = 0; node < ptr.GetFrameBuffer().GetNodeSize(); ++node ) { os << ptr.Get(frame, node) << ", "; if ( node % 16 == 15 ) { os << "\n"; } } os << "]\n"; } os << "]\n"; return os; } inline std::ostream& operator<<(std::ostream& os, FrameBuffer const &buf) { switch (buf.GetType()) { case BB_TYPE_BIT: return os << buf.LockConst<Bit >(); case BB_TYPE_FP32: return os << buf.LockConst<float >(); case BB_TYPE_FP64: return os << buf.LockConst<double >(); case BB_TYPE_INT8: return os << buf.LockConst<int8_t >(); case BB_TYPE_INT16: return os << buf.LockConst<int16_t >(); case BB_TYPE_INT32: return os << buf.LockConst<int32_t >(); case BB_TYPE_INT64: return os << buf.LockConst<int64_t >(); case BB_TYPE_UINT8: return os << buf.LockConst<uint8_t >(); case BB_TYPE_UINT16: return os << buf.LockConst<uint16_t>(); case BB_TYPE_UINT32: return os << buf.LockConst<uint32_t>(); case BB_TYPE_UINT64: return os << buf.LockConst<uint64_t>(); default: BB_ASSERT(0); } return os; } }
gemv_c_dia_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { #ifdef COMPLEX const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * n); memset(tmp[i], 0, sizeof(ALPHA_Number) * n); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; const ALPHA_INT row_start = alpha_max(0, -dis); const ALPHA_INT col_start = alpha_max(0, dis); const ALPHA_INT nnz = (m - row_start)<(n - col_start)?(m - row_start):(n - col_start); const ALPHA_INT start = i * A->lval; for (ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul_3c(v, alpha, A->values[start + row_start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < n; ++i) { alpha_mul(y[i], y[i], beta); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(int i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { #ifdef COMPLEX return ONAME_omp(alpha, A, x, beta, y); #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
prior_box_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { constexpr int kPriorBoxFLOAT = 1; constexpr int kPriorBoxDOUBLE = 2; inline void ExpandAspectRatios(const std::vector<float>& input_aspect_ratior, bool flip, std::vector<float>* output_aspect_ratior) { constexpr float epsilon = 1e-6; output_aspect_ratior->clear(); output_aspect_ratior->push_back(1.0f); for (size_t i = 0; i < input_aspect_ratior.size(); ++i) { float ar = input_aspect_ratior[i]; bool already_exist = false; for (size_t j = 0; j < output_aspect_ratior->size(); ++j) { if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) { already_exist = true; break; } } if (!already_exist) { output_aspect_ratior->push_back(ar); if (flip) { output_aspect_ratior->push_back(1.0f / ar); } } } } template <typename T, typename K> class PriorBoxOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<paddle::framework::Tensor>("Input"); auto* image = ctx.Input<paddle::framework::Tensor>("Image"); auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes"); auto* vars = ctx.Output<paddle::framework::Tensor>("Variances"); auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes"); auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes"); auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios"); auto variances = ctx.Attr<std::vector<float>>("variances"); auto flip = ctx.Attr<bool>("flip"); auto clip = ctx.Attr<bool>("clip"); auto min_max_aspect_ratios_order = ctx.Attr<bool>("min_max_aspect_ratios_order"); std::vector<float> aspect_ratios; ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); K step_w = static_cast<K>(ctx.Attr<float>("step_w")); K step_h = static_cast<K>(ctx.Attr<float>("step_h")); K offset = static_cast<K>(ctx.Attr<float>("offset")); auto img_width = image->dims()[3]; auto img_height = image->dims()[2]; auto feature_width = input->dims()[3]; auto feature_height = input->dims()[2]; K step_width, step_height; if (step_w == 0 || step_h == 0) { step_width = static_cast<K>(img_width) / feature_width; step_height = static_cast<K>(img_height) / feature_height; } else { step_width = step_w; step_height = step_h; } int num_priors = aspect_ratios.size() * min_sizes.size(); if (max_sizes.size() > 0) { num_priors += max_sizes.size(); } boxes->mutable_data<K>(ctx.GetPlace()); vars->mutable_data<K>(ctx.GetPlace()); K* b_t = boxes->data<K>(); for (int h = 0; h < feature_height; ++h) { for (int w = 0; w < feature_width; ++w) { K center_x = (w + offset) * step_width; K center_y = (h + offset) * step_height; K box_width, box_height; for (size_t s = 0; s < min_sizes.size(); ++s) { auto min_size = min_sizes[s]; if (min_max_aspect_ratios_order) { box_width = box_height = min_size / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; if (max_sizes.size() > 0) { auto max_size = max_sizes[s]; // square prior with size sqrt(minSize * maxSize) box_width = box_height = sqrt(min_size * max_size) / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; } // priors with different aspect ratios for (size_t r = 0; r < aspect_ratios.size(); ++r) { float ar = aspect_ratios[r]; if (fabs(ar - 1.) < 1e-6) { continue; } box_width = min_size * sqrt(ar) / 2.; box_height = min_size / sqrt(ar) / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; } } else { // priors with different aspect ratios for (size_t r = 0; r < aspect_ratios.size(); ++r) { float ar = aspect_ratios[r]; box_width = min_size * sqrt(ar) / 2.; box_height = min_size / sqrt(ar) / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; } if (max_sizes.size() > 0) { auto max_size = max_sizes[s]; // square prior with size sqrt(minSize * maxSize) box_width = box_height = sqrt(min_size * max_size) / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; } } } } } if (clip) { K* dt = boxes->data<K>(); std::transform(dt, dt + boxes->numel(), dt, [](K v) -> K { return std::min<K>(std::max<K>(v, 0.), 1.); }); } framework::Tensor var_t; var_t.mutable_data<K>( framework::make_ddim({1, static_cast<int>(variances.size())}), ctx.GetPlace()); auto var_et = framework::EigenTensor<K, 2>::From(var_t); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (size_t i = 0; i < variances.size(); ++i) { var_et(0, i) = variances[i]; } int box_num = feature_height * feature_width * num_priors; auto var_dim = vars->dims(); vars->Resize({box_num, static_cast<int>(variances.size())}); auto e_vars = framework::EigenMatrix<K, Eigen::RowMajor>::From(*vars); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int i = 0; i < box_num; ++i) { for (size_t j = 0; j < variances.size(); ++j) { e_vars(i, j) = variances[j]; } } vars->Resize(var_dim); } }; // namespace operators } // namespace operators } // namespace paddle
md5test.c
/*** * How to compile: * GCC 4.6.3: mpicc md5test.c -o md5test -fopenmp -lcrypto -lssl -std=c99 * GCC 6.3.0: mpicc md5test.c -o md5test -fopenmp -lcrypto -lssl * * 5 nodos no máximo * - 1 mestre * - O mestre só envia a mensagem para os escravos dizendo encontre essa linha e me digam se encontrar ou não. * - Portanto o escravo irá ficar subutilizando a máquina em que estiver. * - 5 escravos (6 livros por escravo) * - Não pode ser 4 escravos por que são 30 livros. * - Isso resultaria em 7.5 livros por escravo, o que não é possível fazer. **/ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <assert.h> #include <openssl/md5.h> #include <mpi.h> #include <omp.h> #define NUMBER_OF_BOOKS 30 #define NUMBER_OF_SLAVES 5 #define NUMBER_OF_BOOKS_BY_SLAVE 6 #define NUMBER_OF_THREADS 8 #define STOP_WHEN_FOUND 1 MPI_Status status; // estrutura que guarda o estado de retorno typedef struct Line1 { char* str; unsigned char* md5; } Line; typedef struct Book1 { int number; size_t lines_len; Line* lines; } Book; char* file_to_str(char* filepath, char* filename) { // https://stackoverflow.com/questions/3747086/reading-the-whole-text-file-into-a-char-array-in-c FILE* fp = fopen(filepath, "rb"); long lSize; char* buffer; if (!fp) { perror(filename); exit(1); } fseek(fp, 0L, SEEK_END); lSize = ftell(fp); rewind(fp); /* allocate memory for entire content */ buffer = calloc(1, lSize + 1); if (!buffer) { fclose(fp); fputs("memory alloc fails", stderr); exit(1); } /* copy the file into the buffer */ if (1 != fread(buffer, lSize, 1, fp)) { fclose(fp); free(buffer); fputs("entire read fails", stderr); exit(1); } fclose(fp); return buffer; } void md5_print(unsigned char* result) { int i; for (i = 0; i < MD5_DIGEST_LENGTH; i++) printf("%02x", *(result + i)); printf("\n"); } unsigned char* str_to_md5(char* str) { unsigned char* result = (unsigned char*) malloc(MD5_DIGEST_LENGTH*sizeof(unsigned char)); MD5(str, strlen(str), result); return result; } char** str_split(char* a_str, const char a_delim, size_t* len) { // https://stackoverflow.com/questions/9210528/split-string-with-delimiters-in-c char** result = 0; size_t count = 0; char* tmp = a_str; char* last_comma = 0; char delim[2]; delim[0] = a_delim; delim[1] = 0; /* Count how many elements will be extracted. */ while (*tmp) { if (a_delim == *tmp) { count++; last_comma = tmp; } tmp++; } /* Add space for trailing token. */ count += last_comma < (a_str + strlen(a_str) - 1); /* Add space for terminating null string so caller knows where the list of returned strings ends. */ count++; result = malloc(sizeof(char*) * count); if (result) { size_t idx = 0; *len = 0; char* token = strtok(a_str, delim); while (token) { *(result + idx++) = strdup(token); if (strlen(token) > 1) (*len)++; token = strtok(0, delim); } *(result + idx) = 0; } return result; } char* load_book_i(int i) { char filepath[25]; char filename[8]; sprintf(filepath, "plain_text_books/%d.txt", i); sprintf(filename, "%d.txt", i); char* whole_text = file_to_str(filepath, filename); return whole_text; } void books_print(Book* books) { int i; for (i = 0; i < NUMBER_OF_BOOKS; i++) { printf("Book %d; total lines %lu\n", books[i].number, books[i].lines_len); int j; for (j = 0; j < books[i].lines_len; j++) { Line* line = &(books[i].lines[j]); printf("Book %d; line %d of %lu: %s\n", books[i].number, j+1, books[i].lines_len, line->str); md5_print(line->md5); } } } bool md5_equals(unsigned char* md5_a, unsigned char* md5_b) { int i; for (i = 0; i < MD5_DIGEST_LENGTH; i++) if (*(md5_a + i) != *(md5_b + i)) return false; return true; } int find_line_in_book(Book* books, unsigned char* md5) { int i; int book_number = -1; #pragma omp parallel shared(md5, book_number) #pragma omp for schedule (dynamic) for (i = 0; i < NUMBER_OF_BOOKS_BY_SLAVE; i++) { int j; for (j = 0; j < books[i].lines_len; j++) { Line* line = &(books[i].lines[j]); if (book_number != -1) { if (STOP_WHEN_FOUND == 1) { break; } else { continue; } } else if (md5_equals(md5, line->md5)) { #pragma omp atomic book_number += books[i].number; } } } return book_number; } int find_line_in_books(Book* books, char* line_str) { unsigned char* md5 = str_to_md5(line_str); int book_number = 0; int i; for (i = 1; i <= NUMBER_OF_SLAVES; i++) { MPI_Send(md5, MD5_DIGEST_LENGTH, MPI_UNSIGNED_CHAR, i, 1, MPI_COMM_WORLD); } free(md5); for (i = 1; i <= NUMBER_OF_SLAVES; i++) { int found = 0; MPI_Recv(&found, 1, MPI_INT, i, 1, MPI_COMM_WORLD, &status); book_number = found > 0 ? found : 0; if (book_number > 0) { //printf("\nEscravo[%d]: encontrou no livro %d", i, found); } else { //printf("\nEscravo[%d]: não encontrou", i); } } return book_number; } void find_all_lines_in_books(Book* books) { int i; for (i = 0; i < NUMBER_OF_BOOKS; i++) { printf("\rBook %d of %d", i+1, NUMBER_OF_BOOKS); fflush(stdout); int j; for (j = 0; j < books[i].lines_len; j++) { Line* line = &(books[i].lines[j]); char* line_str = line->str; int book_number = find_line_in_books(books, line_str); } } printf("\n"); } size_t number_of_lines_in_books(Book* books) { size_t all_lines_len = 0; int i; for (i = 0; i < NUMBER_OF_BOOKS; i++) { all_lines_len += books[i].lines_len; } return all_lines_len; } void free_books(Book* books, int len) { int i; for (i = 0; i < len; i++) { int j; for (j = 0; j < books[i].lines_len; j++) { Line* line = &(books[i].lines[j]); char* line_str = line->str; unsigned char* md5 = line->md5; free(md5); free(line_str); } Line* lines = &(books[i].lines); // free(lines); // TODO: Freeing all lines structs not working } } void update_book(Book* book, char* whole_text) { size_t len; char** lines = str_split(whole_text, '\n', &len); book->lines_len = len; book->lines = (Line*) malloc(len*sizeof(Line)); if (lines) { int count = 0; int j; for (j = 0; *(lines + j); j++) { if (strlen(*(lines + j)) > 1) { Line* line = &(book->lines[count++]); line->str = *(lines + j); line->md5 = str_to_md5(*(lines + j)); } } } } int main(int argc, char** argv) { int my_rank; // Identificador deste processo int proc_n; // Numero de processos disparados pelo usuario na linha de comando (np) MPI_Init(&argc , &argv); // funcao que inicializa o MPI, todo o codigo paralelo estah abaixo MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // pega pega o numero do processo atual (rank) MPI_Comm_size(MPI_COMM_WORLD, &proc_n); // pega informacao do numero de processos (quantidade total) if (proc_n != NUMBER_OF_SLAVES + 1) { printf("The number of processes (%d) must be equal to the number of slaves (%d) + 1 (the master)!\n", proc_n, NUMBER_OF_SLAVES); MPI_Finalize(); return EXIT_FAILURE; } if (my_rank == 0) { // papel do mestre Book books[NUMBER_OF_BOOKS]; int slave; int i = 1; for (slave = 1; slave <= NUMBER_OF_SLAVES; slave++) { int cont; for (cont = 1; cont <= NUMBER_OF_BOOKS_BY_SLAVE; cont++) { books[i - 1].number = i; char* whole_text = load_book_i(i); size_t len = strlen(whole_text); MPI_Send(&i, 1, MPI_INT, slave, 1, MPI_COMM_WORLD); MPI_Send(&len, 1, MPI_UNSIGNED_LONG, slave, 1, MPI_COMM_WORLD); MPI_Send(whole_text, len, MPI_CHAR, slave, 1, MPI_COMM_WORLD); update_book(&(books[i-1]), whole_text); i++; free(whole_text); } } size_t all_lines_len = number_of_lines_in_books(books); for (slave = 1; slave <= NUMBER_OF_SLAVES; slave++) { MPI_Send(&all_lines_len, 1, MPI_UNSIGNED_LONG, slave, 1, MPI_COMM_WORLD); } // books_print(&books); double starttime = MPI_Wtime(); find_all_lines_in_books(&books); double stoptime = MPI_Wtime(); double executiontime = stoptime - starttime; printf("Execution time: %.2f s\n", executiontime); free_books(&books, NUMBER_OF_BOOKS); } else { // papel do escravo omp_set_num_threads(NUMBER_OF_THREADS); Book books[NUMBER_OF_BOOKS_BY_SLAVE]; int i; for (i = 1; i <= NUMBER_OF_BOOKS_BY_SLAVE; i++) { int book_number; MPI_Recv(&book_number, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status); size_t len; MPI_Recv(&len, 1, MPI_UNSIGNED_LONG, 0, 1, MPI_COMM_WORLD, &status); char* whole_text = (char*) malloc((1+len)*sizeof(char)); *(whole_text + len) = '\0'; MPI_Recv(whole_text, len, MPI_CHAR, 0, 1, MPI_COMM_WORLD, &status); books[i-1].number = book_number; update_book(&(books[i-1]), whole_text); free(whole_text); } size_t all_lines_len; MPI_Recv(&all_lines_len, 1, MPI_UNSIGNED_LONG, 0, 1, MPI_COMM_WORLD, &status); size_t request_number; for (request_number = 0; request_number < all_lines_len; request_number++) { unsigned char md5[MD5_DIGEST_LENGTH]; MPI_Recv(&md5, MD5_DIGEST_LENGTH, MPI_UNSIGNED_CHAR, 0, 1, MPI_COMM_WORLD, &status); int found = find_line_in_book(&books, &md5); MPI_Send(&found, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); } free_books(&books, NUMBER_OF_BOOKS_BY_SLAVE); } MPI_Finalize(); return EXIT_SUCCESS; }
fig4.43-schedule-clause.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #endif int main() { int i, j, n = 9; #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(4); #endif #pragma omp parallel for default(none) schedule(runtime) \ private(i,j) shared(n) for (i=0; i<n; i++) { printf("Iteration %d executed by thread %d\n", i, omp_get_thread_num()); for (j=0; j<i; j++) system("sleep 1"); } /*-- End of parallel for --*/ return(0); }
GB_unaryop__one_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_uint8_uint8 // op(A') function: GB_tran__one_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_uint8_uint8 ( uint8_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__div_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__div_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__div_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint16) // A*D function (colscale): GB (_AxD__div_uint16) // D*A function (rowscale): GB (_DxB__div_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint16) // C=scalar+B GB (_bind1st__div_uint16) // C=scalar+B' GB (_bind1st_tran__div_uint16) // C=A+scalar GB (_bind2nd__div_uint16) // C=A'+scalar GB (_bind2nd_tran__div_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT16 || GxB_NO_DIV_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 16) ; \ } GrB_Info GB (_bind1st_tran__div_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 16) ; \ } GrB_Info GB (_bind2nd_tran__div_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int8_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_bool) // op(A') function: GB (_unop_tran__identity_int8_bool) // C type: int8_t // A type: bool // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_bool) ( int8_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
close_manual.c
// RUN: %libomptarget-compile-run-and-check-generic #include <omp.h> #include <stdio.h> // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL extern void __tgt_register_requires(int64_t); extern void __tgt_target_data_begin(int64_t device_id, int32_t arg_num, void **args_base, void **args, int64_t *arg_sizes, int64_t *arg_types); extern void __tgt_target_data_end(int64_t device_id, int32_t arg_num, void **args_base, void **args, int64_t *arg_sizes, int64_t *arg_types); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- #pragma omp requires unified_shared_memory #define N 1024 int main(int argc, char *argv[]) { int fails; void *host_alloc = 0, *device_alloc = 0; int *a = (int *)malloc(N * sizeof(int)); // Manual registration of requires flags for Clang versions // that do not support requires. __tgt_register_requires(8); // Init for (int i = 0; i < N; ++i) { a[i] = 10; } host_alloc = &a[0]; // Dummy target region that ensures the runtime library is loaded when // the target data begin/end functions are manually called below. #pragma omp target {} // Manual calls int device_id = omp_get_default_device(); int arg_num = 1; void **args_base = (void **)&a; void **args = (void **)&a; int64_t arg_sizes[arg_num]; arg_sizes[0] = sizeof(int) * N; int64_t arg_types[arg_num]; // Ox400 enables the CLOSE map type in the runtime: // OMP_TGT_MAPTYPE_CLOSE = 0x400 // OMP_TGT_MAPTYPE_TO = 0x001 arg_types[0] = 0x400 | 0x001; device_alloc = host_alloc; __tgt_target_data_begin(device_id, arg_num, args_base, args, arg_sizes, arg_types); #pragma omp target data use_device_ptr(a) { device_alloc = a; } __tgt_target_data_end(device_id, arg_num, args_base, args, arg_sizes, arg_types); // CHECK: a was copied to the device if (device_alloc != host_alloc) printf("a was copied to the device\n"); free(a); // CHECK: Done! printf("Done!\n"); return 0; }
2Dpfold.c
/* minimum free energy RNA secondary structure with basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include <float.h> /* #defines FLT_MAX ... */ #include "utils.h" #include "fold_vars.h" #include "params.h" #include "energy_par.h" #include "loop_energies.h" #include "pair_mat.h" #include "mm.h" #include "2Dpfold.h" /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void pf2D_linear(TwoDpfold_vars *vars); PRIVATE void pf2D_circ(TwoDpfold_vars *vars); PRIVATE void initialize_TwoDpfold_vars(TwoDpfold_vars *vars); PRIVATE void scale_pf_params2(TwoDpfold_vars *vars); PRIVATE void make_ptypes2(TwoDpfold_vars *vars); PRIVATE char *TwoDpfold_pbacktrack_circ( TwoDpfold_vars *vars, int d1, int d2); PRIVATE void backtrack( TwoDpfold_vars *vars, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm( TwoDpfold_vars *vars, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm1( TwoDpfold_vars *vars, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm2( TwoDpfold_vars *vars, char *pstruc, int d1, int d2, unsigned int k); PRIVATE void backtrack_qcH( TwoDpfold_vars *vars, char *pstruc, int d1, int d2); PRIVATE void backtrack_qcI( TwoDpfold_vars *vars, char *pstruc, int d1, int d2); PRIVATE void backtrack_qcM( TwoDpfold_vars *vars, char *pstruc, int d1, int d2); PRIVATE void adjustArrayBoundaries( FLT_OR_DBL ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_real, int k_max_real, int *l_min_real, int *l_max_real); INLINE PRIVATE void preparePosteriorBoundaries( int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void updatePosteriorBoundaries( int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareBoundaries( int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareArray( FLT_OR_DBL ***array, int min_k, int max_k, int *min_l, int *max_l); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC TwoDpfold_vars *get_TwoDpfold_variables(const char *seq, const char *structure1, char *structure2, int circ){ unsigned int size, length; int *index; TwoDpfold_vars *vars; vars = (TwoDpfold_vars *)malloc(sizeof(TwoDpfold_vars)); length = (unsigned int) strlen(seq); vars->sequence = (char *)space(length + 1); strcpy(vars->sequence, seq); vars->seq_length = length; if(vars->seq_length < 1) nrerror("get_TwoDfold_variables: sequence must be longer than 0"); size = ((length + 1) * (length + 2)/2); vars->reference_pt1 = make_pair_table(structure1); vars->reference_pt2 = make_pair_table(structure2); vars->referenceBPs1 = make_referenceBP_array(vars->reference_pt1, TURN); /* matrix containing number of basepairs of reference structure1 in interval [i,j] */ vars->referenceBPs2 = make_referenceBP_array(vars->reference_pt2, TURN); /* matrix containing number of basepairs of reference structure2 in interval [i,j] */ /* compute maximum matching with reference structure 1 disallowed */ vars->mm1 = maximumMatchingConstraint(vars->sequence, vars->reference_pt1); /* compute maximum matching with reference structure 2 disallowed */ vars->mm2 = maximumMatchingConstraint(vars->sequence, vars->reference_pt2); vars->bpdist = compute_BPdifferences(vars->reference_pt1, vars->reference_pt2, TURN); vars->dangles = dangles; vars->circ = circ; vars->temperature = temperature; vars->init_temp = temperature; vars->pf_scale = pf_scale; vars->pf_params = NULL; vars->scale = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+1)); vars->ptype = space(sizeof(char) * size); vars->S = NULL; vars->S1 = NULL; vars->jindx = get_indx(length); vars->my_iindx = get_iindx(length); index = vars->my_iindx; /* vars->maxD1 = 0; vars->maxD2 = 0; */ vars->maxD1 = vars->mm1[index[1]-length] + vars->referenceBPs1[index[1]-length]; vars->maxD2 = vars->mm2[index[1]-length] + vars->referenceBPs2[index[1]-length]; /* allocate memory for the pf matrices and min-/max-index helper arrays */ vars->Q = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * size); vars->l_min_values = (int **) space(sizeof(int *) * size); vars->l_max_values = (int **) space(sizeof(int *) * size); vars->k_min_values = (int *) space(sizeof(int) * size); vars->k_max_values = (int *) space(sizeof(int) * size); vars->Q_B = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * size); vars->l_min_values_b = (int **) space(sizeof(int *) * size); vars->l_max_values_b = (int **) space(sizeof(int *) * size); vars->k_min_values_b = (int *) space(sizeof(int) * size); vars->k_max_values_b = (int *) space(sizeof(int) * size); vars->Q_M = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * size); vars->l_min_values_m = (int **) space(sizeof(int *) * size); vars->l_max_values_m = (int **) space(sizeof(int *) * size); vars->k_min_values_m = (int *) space(sizeof(int) * size); vars->k_max_values_m = (int *) space(sizeof(int) * size); vars->Q_M1 = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * size); vars->l_min_values_m1 = (int **) space(sizeof(int *) * size); vars->l_max_values_m1 = (int **) space(sizeof(int *) * size); vars->k_min_values_m1 = (int *) space(sizeof(int) * size); vars->k_max_values_m1 = (int *) space(sizeof(int) * size); if(vars->circ){ vars->Q_M2_rem = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) * (length + 1)); vars->Q_M2 = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * (length + 1)); vars->l_min_values_m2 = (int **) space(sizeof(int *) * (length + 1)); vars->l_max_values_m2 = (int **) space(sizeof(int *) * (length + 1)); vars->k_min_values_m2 = (int *) space(sizeof(int) * (length + 1)); vars->k_max_values_m2 = (int *) space(sizeof(int) * (length + 1)); } else{ vars->Q_M2_rem = NULL; vars->Q_M2 = NULL; vars->l_min_values_m2 = NULL; vars->l_max_values_m2 = NULL; vars->k_min_values_m2 = NULL; vars->k_max_values_m2 = NULL; } vars->Q_c = NULL; vars->Q_cH = NULL; vars->Q_cI = NULL; vars->Q_cM = NULL; vars->Q_c_rem = 0; vars->Q_cH_rem = 0; vars->Q_cI_rem = 0; vars->Q_cM_rem = 0; vars->Q_rem = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) * size); vars->Q_B_rem = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) * size); vars->Q_M_rem = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) * size); vars->Q_M1_rem = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) * size); return vars; } PUBLIC TwoDpfold_vars *get_TwoDpfold_variables_from_MFE(TwoDfold_vars *mfe_vars){ unsigned int i, j, size, length; int cnt1, ij, jij; TwoDpfold_vars *vars; vars = (TwoDpfold_vars *)malloc(sizeof(TwoDpfold_vars)); vars->sequence = strdup(mfe_vars->sequence); length = mfe_vars->seq_length; vars->seq_length = length; size = ((length + 1) * (length + 2)/2); vars->reference_pt1 = copy_pair_table(mfe_vars->reference_pt1); vars->reference_pt2 = copy_pair_table(mfe_vars->reference_pt2); vars->referenceBPs1 = make_referenceBP_array(vars->reference_pt1, TURN); vars->referenceBPs2 = make_referenceBP_array(vars->reference_pt2, TURN); /* compute maximum matching with reference structure 1 disallowed */ vars->mm1 = maximumMatchingConstraint(vars->sequence, vars->reference_pt1); /* compute maximum matching with reference structure 2 disallowed */ vars->mm2 = maximumMatchingConstraint(vars->sequence, vars->reference_pt2); vars->bpdist = compute_BPdifferences(vars->reference_pt1, vars->reference_pt2, TURN); vars->dangles = mfe_vars->dangles; vars->circ = mfe_vars->circ; vars->temperature = mfe_vars->temperature; vars->init_temp = mfe_vars->temperature; vars->pf_scale = pf_scale; vars->scale = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+1)); vars->ptype = space(sizeof(char) * size); vars->S = NULL; vars->S1 = NULL; vars->pf_params = NULL; vars->maxD1 = mfe_vars->maxD1; vars->maxD2 = mfe_vars->maxD2; vars->jindx = get_indx(vars->seq_length); vars->my_iindx = get_iindx(vars->seq_length); /* allocate memory for the pf matrices and min-/max-index helper arrays */ vars->Q = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * size); vars->l_min_values = (int **) space(sizeof(int *) * size); vars->l_max_values = (int **) space(sizeof(int *) * size); vars->k_min_values = (int *) space(sizeof(int) * size); vars->k_max_values = (int *) space(sizeof(int) * size); vars->Q_B = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * size); vars->l_min_values_b = (int **) space(sizeof(int *) * size); vars->l_max_values_b = (int **) space(sizeof(int *) * size); vars->k_min_values_b = (int *) space(sizeof(int) * size); vars->k_max_values_b = (int *) space(sizeof(int) * size); vars->Q_M = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * size); vars->l_min_values_m = (int **) space(sizeof(int *) * size); vars->l_max_values_m = (int **) space(sizeof(int *) * size); vars->k_min_values_m = (int *) space(sizeof(int) * size); vars->k_max_values_m = (int *) space(sizeof(int) * size); vars->Q_M1 = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * size); vars->l_min_values_m1 = (int **) space(sizeof(int *) * size); vars->l_max_values_m1 = (int **) space(sizeof(int *) * size); vars->k_min_values_m1 = (int *) space(sizeof(int) * size); vars->k_max_values_m1 = (int *) space(sizeof(int) * size); if(vars->circ){ vars->Q_M2 = (FLT_OR_DBL ***) space(sizeof(FLT_OR_DBL **) * (length + 1)); vars->l_min_values_m2 = (int **) space(sizeof(int *) * (length + 1)); vars->l_max_values_m2 = (int **) space(sizeof(int *) * (length + 1)); vars->k_min_values_m2 = (int *) space(sizeof(int) * (length + 1)); vars->k_max_values_m2 = (int *) space(sizeof(int) * (length + 1)); } else{ vars->Q_M2 = NULL; vars->l_min_values_m2 = NULL; vars->l_max_values_m2 = NULL; vars->k_min_values_m2 = NULL; vars->k_max_values_m2 = NULL; } vars->Q_c = NULL; vars->Q_cH = NULL; vars->Q_cI = NULL; vars->Q_cM = NULL; /* prepare everything that is known from previous mfe folding step */ for(i = 1; i < length; i++) for(j = i; j <= length; j++){ int mem; ij = vars->my_iindx[i] - j; jij = vars->jindx[j] + i; if(mfe_vars->E_C[ij]){ mem = mfe_vars->k_max_values[ij] - mfe_vars->k_min_values[ij] + 1; vars->k_min_values_b[ij] = mfe_vars->k_min_values[ij]; vars->k_max_values_b[ij] = mfe_vars->k_max_values[ij]; vars->Q_B[ij] = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * mem); vars->l_min_values_b[ij] = (int *)space(sizeof(int) * mem); vars->l_max_values_b[ij] = (int *)space(sizeof(int) * mem); vars->Q_B[ij] -= vars->k_min_values_b[ij]; vars->l_min_values_b[ij] -= vars->k_min_values_b[ij]; vars->l_max_values_b[ij] -= vars->k_min_values_b[ij]; for(cnt1 = vars->k_min_values_b[ij]; cnt1 <= vars->k_max_values_b[ij]; cnt1++){ vars->l_min_values_b[ij][cnt1] = mfe_vars->l_min_values[ij][cnt1]; vars->l_max_values_b[ij][cnt1] = mfe_vars->l_max_values[ij][cnt1]; if(vars->l_min_values_b[ij][cnt1] < INF){ vars->Q_B[ij][cnt1] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * ((vars->l_max_values_b[ij][cnt1] - vars->l_min_values_b[ij][cnt1] + 1)/2 + 1)); vars->Q_B[ij][cnt1] -= vars->l_min_values_b[ij][cnt1]/2; } } } if(mfe_vars->E_M[ij]){ mem = mfe_vars->k_max_values_m[ij] - mfe_vars->k_min_values_m[ij] + 1; vars->k_min_values_m[ij] = mfe_vars->k_min_values_m[ij]; vars->k_max_values_m[ij] = mfe_vars->k_max_values_m[ij]; vars->Q_M[ij] = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * mem); vars->l_min_values_m[ij] = (int *)space(sizeof(int) * mem); vars->l_max_values_m[ij] = (int *)space(sizeof(int) * mem); vars->Q_M[ij] -= vars->k_min_values_m[ij]; vars->l_min_values_m[ij] -= vars->k_min_values_m[ij]; vars->l_max_values_m[ij] -= vars->k_min_values_m[ij]; for(cnt1 = vars->k_min_values_m[ij]; cnt1 <= vars->k_max_values_m[ij]; cnt1++){ vars->l_min_values_m[ij][cnt1] = mfe_vars->l_min_values_m[ij][cnt1]; vars->l_max_values_m[ij][cnt1] = mfe_vars->l_max_values_m[ij][cnt1]; if(vars->l_min_values_m[ij][cnt1] < INF){ vars->Q_M[ij][cnt1] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * ((vars->l_max_values_m[ij][cnt1] - vars->l_min_values_m[ij][cnt1] + 1)/2 + 1)); vars->Q_M[ij][cnt1] -= vars->l_min_values_m[ij][cnt1]/2; } } } if(mfe_vars->E_M1[ij]){ mem = mfe_vars->k_max_values_m1[ij] - mfe_vars->k_min_values_m1[ij] + 1; vars->k_min_values_m1[jij] = mfe_vars->k_min_values_m1[ij]; vars->k_max_values_m1[jij] = mfe_vars->k_max_values_m1[ij]; vars->Q_M1[jij] = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * mem); vars->l_min_values_m1[jij] = (int *)space(sizeof(int) * mem); vars->l_max_values_m1[jij] = (int *)space(sizeof(int) * mem); vars->Q_M1[jij] -= vars->k_min_values_m1[jij]; vars->l_min_values_m1[jij] -= vars->k_min_values_m1[jij]; vars->l_max_values_m1[jij] -= vars->k_min_values_m1[jij]; for(cnt1 = vars->k_min_values_m1[jij]; cnt1 <= vars->k_max_values_m1[jij]; cnt1++){ vars->l_min_values_m1[jij][cnt1] = mfe_vars->l_min_values_m1[ij][cnt1]; vars->l_max_values_m1[jij][cnt1] = mfe_vars->l_max_values_m1[ij][cnt1]; if(vars->l_min_values_m1[jij][cnt1] < INF){ vars->Q_M1[jij][cnt1] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * ((vars->l_max_values_m1[jij][cnt1] - vars->l_min_values_m1[jij][cnt1] + 1)/2 + 1)); vars->Q_M1[jij][cnt1] -= vars->l_min_values_m1[jij][cnt1]/2; } } } } if(vars->circ){ int mem; if(mfe_vars->E_Fc){ mem = mfe_vars->k_max_values_fc - mfe_vars->k_min_values_fc + 1; vars->Q_c = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * mem); vars->l_min_values_qc = (int *)space(sizeof(int) * mem); vars->l_max_values_qc = (int *)space(sizeof(int) * mem); vars->k_min_values_qc = mfe_vars->k_min_values_fc; vars->k_max_values_qc = mfe_vars->k_max_values_fc; vars->Q_c -= vars->k_min_values_qc; vars->l_min_values_qc -= vars->k_min_values_qc; vars->l_max_values_qc -= vars->k_min_values_qc; for(cnt1 = vars->k_min_values_qc; cnt1 <= vars->k_max_values_qc; cnt1++){ mem = (mfe_vars->l_max_values_fc[cnt1] - mfe_vars->l_min_values_fc[cnt1] + 1)/2 + 1; vars->l_min_values_qc[cnt1] = mfe_vars->l_min_values_fc[cnt1]; vars->l_max_values_qc[cnt1] = mfe_vars->l_max_values_fc[cnt1]; if(vars->l_min_values_qc[cnt1] < INF){ vars->Q_c[cnt1] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * mem); vars->Q_c[cnt1] -= vars->l_min_values_qc[cnt1]/2; } } } if(mfe_vars->E_FcH){ mem = mfe_vars->k_max_values_fcH - mfe_vars->k_min_values_fcH + 1; vars->Q_cH = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * mem); vars->l_min_values_qcH = (int *)space(sizeof(int) * mem); vars->l_max_values_qcH = (int *)space(sizeof(int) * mem); vars->k_min_values_qcH = mfe_vars->k_min_values_fcH; vars->k_max_values_qcH = mfe_vars->k_max_values_fcH; vars->Q_cH -= vars->k_min_values_qcH; vars->l_min_values_qcH -= vars->k_min_values_qcH; vars->l_max_values_qcH -= vars->k_min_values_qcH; for(cnt1 = vars->k_min_values_qcH; cnt1 <= vars->k_max_values_qcH; cnt1++){ mem = (mfe_vars->l_max_values_fcH[cnt1] - mfe_vars->l_min_values_fcH[cnt1] + 1)/2 + 1; vars->l_min_values_qcH[cnt1] = mfe_vars->l_min_values_fcH[cnt1]; vars->l_max_values_qcH[cnt1] = mfe_vars->l_max_values_fcH[cnt1]; if(vars->l_min_values_qcH[cnt1] < INF){ vars->Q_cH[cnt1] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * mem); vars->Q_cH[cnt1] -= vars->l_min_values_qcH[cnt1]/2; } } } if(mfe_vars->E_FcI){ mem = mfe_vars->k_max_values_fcI - mfe_vars->k_min_values_fcI + 1; vars->Q_cI = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * mem); vars->l_min_values_qcI = (int *)space(sizeof(int) * mem); vars->l_max_values_qcI = (int *)space(sizeof(int) * mem); vars->k_min_values_qcI = mfe_vars->k_min_values_fcI; vars->k_max_values_qcI = mfe_vars->k_max_values_fcI; vars->Q_cI -= vars->k_min_values_qcI; vars->l_min_values_qcI -= vars->k_min_values_qcI; vars->l_max_values_qcI -= vars->k_min_values_qcI; for(cnt1 = vars->k_min_values_qcI; cnt1 <= vars->k_max_values_qcI; cnt1++){ mem = (mfe_vars->l_max_values_fcI[cnt1] - mfe_vars->l_min_values_fcI[cnt1] + 1)/2 + 1; vars->l_min_values_qcI[cnt1] = mfe_vars->l_min_values_fcI[cnt1]; vars->l_max_values_qcI[cnt1] = mfe_vars->l_max_values_fcI[cnt1]; if(vars->l_min_values_qcI[cnt1] < INF){ vars->Q_cI[cnt1] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * mem); vars->Q_cI[cnt1] -= vars->l_min_values_qcI[cnt1]/2; } } } if(mfe_vars->E_FcM){ mem = mfe_vars->k_max_values_fcM - mfe_vars->k_min_values_fcM + 1; vars->Q_cM = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * mem); vars->l_min_values_qcM = (int *)space(sizeof(int) * mem); vars->l_max_values_qcM = (int *)space(sizeof(int) * mem); vars->k_min_values_qcM = mfe_vars->k_min_values_fcM; vars->k_max_values_qcM = mfe_vars->k_max_values_fcM; vars->Q_cM -= vars->k_min_values_qcM; vars->l_min_values_qcM -= vars->k_min_values_qcM; vars->l_max_values_qcM -= vars->k_min_values_qcM; for(cnt1 = vars->k_min_values_qcM; cnt1 <= vars->k_max_values_qcM; cnt1++){ mem = (mfe_vars->l_max_values_fcM[cnt1] - mfe_vars->l_min_values_fcM[cnt1] + 1)/2 + 1; vars->l_min_values_qcM[cnt1] = mfe_vars->l_min_values_fcM[cnt1]; vars->l_max_values_qcM[cnt1] = mfe_vars->l_max_values_fcM[cnt1]; if(vars->l_min_values_qcM[cnt1] < INF){ vars->Q_cM[cnt1] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * mem); vars->Q_cM[cnt1] -= vars->l_min_values_qcM[cnt1]/2; } } } if(mfe_vars->E_M2){ vars->Q_M2 = (FLT_OR_DBL ***)space(sizeof(FLT_OR_DBL **) * (length + 1)); vars->k_min_values_m2 = (int *)space(sizeof(int) * (length + 1)); vars->k_max_values_m2 = (int *)space(sizeof(int) * (length + 1)); vars->l_min_values_m2 = (int **)space(sizeof(int *) * (length + 1)); vars->l_max_values_m2 = (int **)space(sizeof(int *) * (length + 1)); for(i=1; i < length - TURN - 1; i++){ if(!mfe_vars->E_M2[i]) continue; mem = mfe_vars->k_max_values_m2[i] - mfe_vars->k_min_values_m2[i] + 1; vars->k_min_values_m2[i] = mfe_vars->k_min_values_m2[i]; vars->k_max_values_m2[i] = mfe_vars->k_max_values_m2[i]; vars->Q_M2[i] = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * mem); vars->l_min_values_m2[i] = (int *)space(sizeof(int) * mem); vars->l_max_values_m2[i] = (int *)space(sizeof(int) * mem); vars->Q_M2[i] -= vars->k_min_values_m2[i]; vars->l_min_values_m2[i] -= vars->k_min_values_m2[i]; vars->l_max_values_m2[i] -= vars->k_min_values_m2[i]; for(cnt1 = vars->k_min_values_m2[i]; cnt1 <= vars->k_max_values_m2[i]; cnt1++){ mem = (mfe_vars->l_max_values_m2[i][cnt1] - mfe_vars->l_min_values_m2[i][cnt1] + 1)/2 + 1; vars->l_min_values_m2[i][cnt1] = mfe_vars->l_min_values_m2[i][cnt1]; vars->l_max_values_m2[i][cnt1] = mfe_vars->l_max_values_m2[i][cnt1]; if(vars->l_min_values_m2[i][cnt1] < INF){ vars->Q_M2[i][cnt1] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * mem); vars->Q_M2[i][cnt1] -= vars->l_min_values_m2[i][cnt1]/2; } } } } } else{ vars->Q_M2 = NULL; vars->Q_c = vars->Q_cH = vars->Q_cI = vars->Q_cM = NULL; vars->k_min_values_m2 = vars->k_max_values_m2 = NULL; vars->l_min_values_m2 = vars->l_max_values_m2 = NULL; vars->l_min_values_qc = vars->l_max_values_qc = NULL; vars->l_min_values_qcH = vars->l_max_values_qcH = NULL; vars->l_min_values_qcI = vars->l_max_values_qcI = NULL; vars->l_min_values_qcM = vars->l_max_values_qcM = NULL; } return vars; } PUBLIC void destroy_TwoDpfold_variables(TwoDpfold_vars *vars){ unsigned int i, j, ij; int cnt1; if(vars == NULL) return; #ifdef _OPENMP #pragma omp sections private(i,j,ij) { #pragma omp section { #endif if(vars->Q != NULL){ for(i = 1; i <= vars->seq_length; i++){ for(j = i; j <= vars->seq_length; j++){ ij = vars->my_iindx[i] - j; if(!vars->Q[ij]) continue; for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++) if(vars->l_min_values[ij][cnt1] < INF){ vars->Q[ij][cnt1] += vars->l_min_values[ij][cnt1]/2; free(vars->Q[ij][cnt1]); } if(vars->k_min_values[ij] < INF){ vars->Q[ij] += vars->k_min_values[ij]; free(vars->Q[ij]); vars->l_min_values[ij] += vars->k_min_values[ij]; vars->l_max_values[ij] += vars->k_min_values[ij]; free(vars->l_min_values[ij]); free(vars->l_max_values[ij]); } } } free(vars->Q); free(vars->l_min_values); free(vars->l_max_values); free(vars->k_min_values); free(vars->k_max_values); } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_B != NULL){ for(i = 1; i < vars->seq_length; i++){ for(j = i; j <= vars->seq_length; j++){ ij = vars->my_iindx[i] - j; if(!vars->Q_B[ij]) continue; for(cnt1 = vars->k_min_values_b[ij]; cnt1 <= vars->k_max_values_b[ij]; cnt1++) if(vars->l_min_values_b[ij][cnt1] < INF){ vars->Q_B[ij][cnt1] += vars->l_min_values_b[ij][cnt1]/2; free(vars->Q_B[ij][cnt1]); } if(vars->k_min_values_b[ij] < INF){ vars->Q_B[ij] += vars->k_min_values_b[ij]; free(vars->Q_B[ij]); vars->l_min_values_b[ij] += vars->k_min_values_b[ij]; vars->l_max_values_b[ij] += vars->k_min_values_b[ij]; free(vars->l_min_values_b[ij]); free(vars->l_max_values_b[ij]); } } } free(vars->Q_B); free(vars->l_min_values_b); free(vars->l_max_values_b); free(vars->k_min_values_b); free(vars->k_max_values_b); } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_M != NULL){ for(i = 1; i < vars->seq_length; i++){ for(j = i; j <= vars->seq_length; j++){ ij = vars->my_iindx[i] - j; if(!vars->Q_M[ij]) continue; for(cnt1 = vars->k_min_values_m[ij]; cnt1 <= vars->k_max_values_m[ij]; cnt1++) if(vars->l_min_values_m[ij][cnt1] < INF){ vars->Q_M[ij][cnt1] += vars->l_min_values_m[ij][cnt1]/2; free(vars->Q_M[ij][cnt1]); } if(vars->k_min_values_m[ij] < INF){ vars->Q_M[ij] += vars->k_min_values_m[ij]; free(vars->Q_M[ij]); vars->l_min_values_m[ij] += vars->k_min_values_m[ij]; vars->l_max_values_m[ij] += vars->k_min_values_m[ij]; free(vars->l_min_values_m[ij]); free(vars->l_max_values_m[ij]); } } } free(vars->Q_M); free(vars->l_min_values_m); free(vars->l_max_values_m); free(vars->k_min_values_m); free(vars->k_max_values_m); } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_M1 != NULL){ for(i = 1; i < vars->seq_length; i++){ for(j = i; j <= vars->seq_length; j++){ ij = vars->jindx[j] + i; if(!vars->Q_M1[ij]) continue; for(cnt1 = vars->k_min_values_m1[ij]; cnt1 <= vars->k_max_values_m1[ij]; cnt1++) if(vars->l_min_values_m1[ij][cnt1] < INF){ vars->Q_M1[ij][cnt1] += vars->l_min_values_m1[ij][cnt1]/2; free(vars->Q_M1[ij][cnt1]); } if(vars->k_min_values_m1[ij] < INF){ vars->Q_M1[ij] += vars->k_min_values_m1[ij]; free(vars->Q_M1[ij]); vars->l_min_values_m1[ij] += vars->k_min_values_m1[ij]; vars->l_max_values_m1[ij] += vars->k_min_values_m1[ij]; free(vars->l_min_values_m1[ij]); free(vars->l_max_values_m1[ij]); } } } free(vars->Q_M1); free(vars->l_min_values_m1); free(vars->l_max_values_m1); free(vars->k_min_values_m1); free(vars->k_max_values_m1); } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_M2 != NULL){ for(i = 1; i < vars->seq_length-TURN-1; i++){ if(!vars->Q_M2[i]) continue; for(cnt1 = vars->k_min_values_m2[i]; cnt1 <= vars->k_max_values_m2[i]; cnt1++) if(vars->l_min_values_m2[i][cnt1] < INF){ vars->Q_M2[i][cnt1] += vars->l_min_values_m2[i][cnt1]/2; free(vars->Q_M2[i][cnt1]); } if(vars->k_min_values_m2[i] < INF){ vars->Q_M2[i] += vars->k_min_values_m2[i]; free(vars->Q_M2[i]); vars->l_min_values_m2[i] += vars->k_min_values_m2[i]; vars->l_max_values_m2[i] += vars->k_min_values_m2[i]; free(vars->l_min_values_m2[i]); free(vars->l_max_values_m2[i]); } } free(vars->Q_M2); free(vars->l_min_values_m2); free(vars->l_max_values_m2); free(vars->k_min_values_m2); free(vars->k_max_values_m2); } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_c != NULL){ for(cnt1 = vars->k_min_values_qc; cnt1 <= vars->k_max_values_qc; cnt1++) if(vars->l_min_values_qc[cnt1] < INF){ vars->Q_c[cnt1] += vars->l_min_values_qc[cnt1]/2; free(vars->Q_c[cnt1]); } if(vars->k_min_values_qc < INF){ vars->Q_c += vars->k_min_values_qc; free(vars->Q_c); vars->l_min_values_qc += vars->k_min_values_qc; vars->l_max_values_qc += vars->k_min_values_qc; free(vars->l_min_values_qc); free(vars->l_max_values_qc); } } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_cI != NULL){ for(cnt1 = vars->k_min_values_qcI; cnt1 <= vars->k_max_values_qcI; cnt1++) if(vars->l_min_values_qcI[cnt1] < INF){ vars->Q_cI[cnt1] += vars->l_min_values_qcI[cnt1]/2; free(vars->Q_cI[cnt1]); } if(vars->k_min_values_qcI < INF){ vars->Q_cI += vars->k_min_values_qcI; free(vars->Q_cI); vars->l_min_values_qcI += vars->k_min_values_qcI; vars->l_max_values_qcI += vars->k_min_values_qcI; free(vars->l_min_values_qcI); free(vars->l_max_values_qcI); } } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_cH != NULL){ for(cnt1 = vars->k_min_values_qcH; cnt1 <= vars->k_max_values_qcH; cnt1++) if(vars->l_min_values_qcH[cnt1] < INF){ vars->Q_cH[cnt1] += vars->l_min_values_qcH[cnt1]/2; free(vars->Q_cH[cnt1]); } if(vars->k_min_values_qcH < INF){ vars->Q_cH += vars->k_min_values_qcH; free(vars->Q_cH); vars->l_min_values_qcH += vars->k_min_values_qcH; vars->l_max_values_qcH += vars->k_min_values_qcH; free(vars->l_min_values_qcH); free(vars->l_max_values_qcH); } } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_cM != NULL){ for(cnt1 = vars->k_min_values_qcM; cnt1 <= vars->k_max_values_qcM; cnt1++) if(vars->l_min_values_qcM[cnt1] < INF){ vars->Q_cM[cnt1] += vars->l_min_values_qcM[cnt1]/2; free(vars->Q_cM[cnt1]); } if(vars->k_min_values_qcM < INF){ vars->Q_cM += vars->k_min_values_qcM; free(vars->Q_cM); vars->l_min_values_qcM += vars->k_min_values_qcM; vars->l_max_values_qcM += vars->k_min_values_qcM; free(vars->l_min_values_qcM); free(vars->l_max_values_qcM); } } #ifdef _OPENMP } #pragma omp section { #endif if(vars->Q_rem) free(vars->Q_rem); if(vars->Q_B_rem) free(vars->Q_B_rem); if(vars->Q_M_rem) free(vars->Q_M_rem); if(vars->Q_M1_rem) free(vars->Q_M1_rem); if(vars->Q_M2_rem) free(vars->Q_M2_rem); if(vars->sequence != NULL) free(vars->sequence); if(vars->reference_pt1 != NULL) free(vars->reference_pt1); if(vars->reference_pt2 != NULL) free(vars->reference_pt2); if(vars->referenceBPs1 != NULL) free(vars->referenceBPs1); if(vars->referenceBPs2 != NULL) free(vars->referenceBPs2); if(vars->ptype != NULL) free(vars->ptype); if(vars->scale != NULL) free(vars->scale); if(vars->S != NULL) free(vars->S); if(vars->pf_params != NULL) free(vars->pf_params); if(vars->S1 != NULL) free(vars->S1); if(vars->mm1 != NULL) free(vars->mm1); if(vars->mm2 != NULL) free(vars->mm2); if(vars->bpdist != NULL) free(vars->bpdist); #ifdef _OPENMP } } #endif if(vars->my_iindx != NULL) free(vars->my_iindx); if(vars->jindx != NULL) free(vars->jindx); free(vars); } PRIVATE void initialize_TwoDpfold_vars(TwoDpfold_vars *vars){ scale_pf_params2(vars); make_pair_matrix(); } TwoDpfold_solution *TwoDpfoldList(TwoDpfold_vars *vars, int distance1, int distance2){ unsigned int maxD1 = 0, maxD2 = 0, counter = 0; int cnt1, cnt2, k_min, k_max, l_min, l_max, ndx; FLT_OR_DBL q = 0.; TwoDpfold_solution *output; initialize_TwoDpfold_vars(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes2(vars); maxD1 = vars->maxD1; maxD2 = vars->maxD2; if(distance1 >= 0){ if((unsigned int)distance1 > maxD1) fprintf(stderr, "TwoDpfoldList@2Dpfold.c: limiting maximum basepair distance 1 to %u\n", maxD1); else maxD1 = (unsigned int)distance1; } if(distance2 >= 0){ if((unsigned int)distance2 > maxD2) fprintf(stderr, "TwoDpfoldList@2Dpfold.c: limiting maximum basepair distance 2 to %u\n", maxD2); else maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (TwoDpfold_solution *)space((((maxD1+1)*(maxD2+2))/2 + 2) * sizeof(TwoDpfold_solution)); pf2D_linear(vars); if(vars->circ) pf2D_circ(vars); ndx = vars->my_iindx[1] - vars->seq_length; k_min = (vars->circ) ? vars->k_min_values_qc: vars->k_min_values[ndx]; k_max = (vars->circ) ? vars->k_max_values_qc: vars->k_max_values[ndx]; for(cnt1 = k_min; cnt1 <= k_max; cnt1++){ l_min = (vars->circ) ? vars->l_min_values_qc[cnt1] : vars->l_min_values[ndx][cnt1]; l_max = (vars->circ) ? vars->l_max_values_qc[cnt1] : vars->l_max_values[ndx][cnt1]; for(cnt2 = l_min; cnt2 <= l_max; cnt2 += 2){ q = (vars->circ) ? vars->Q_c[cnt1][cnt2/2] : vars->Q[ndx][cnt1][cnt2/2]; if(q == 0.) continue; output[counter].k = cnt1; output[counter].l = cnt2; output[counter].q = q; counter++; } } /* store entry for remaining partition if it exists */ q = (vars->circ) ? vars->Q_c_rem : vars->Q_rem[ndx]; if(q != 0.){ output[counter].k = -1; output[counter].l = -1; output[counter].q = q; counter++; } /* insert end-marker entry */ output[counter].k = output[counter].l = INF; counter++; /* resize to actual dataset amount */ output = (TwoDpfold_solution*)xrealloc(output, sizeof(TwoDpfold_solution) * counter); return output; } PUBLIC FLT_OR_DBL **TwoDpfold(TwoDpfold_vars *vars, int distance1, int distance2){ unsigned int i; unsigned int maxD1 = 0; unsigned int maxD2 = 0; unsigned int mm; int cnt1, cnt2; FLT_OR_DBL **output; initialize_TwoDpfold_vars(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes2(vars); for(i=1; i<=(unsigned int)vars->reference_pt1[0]; i++) if(i < (unsigned int)vars->reference_pt1[i]) maxD1++; for(i=1; i<=(unsigned int)vars->reference_pt2[0]; i++) if(i < (unsigned int)vars->reference_pt2[i]) maxD2++; mm = maximumMatching(vars->sequence); maxD1 += mm; maxD2 += mm; if(distance1 >= 0){ if((unsigned int)distance1 > maxD1) fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1); maxD1 = (unsigned int)distance1; } if(distance2 >= 0){ if((unsigned int)distance2 > maxD2) fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2); maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (FLT_OR_DBL **) space(sizeof(FLT_OR_DBL*) * (maxD1+1)); pf2D_linear(vars); int ndx = vars->my_iindx[1] - vars->seq_length; for(cnt1 = vars->k_min_values[ndx]; cnt1 <= MIN2(vars->k_max_values[ndx], vars->maxD1); cnt1++){ output[cnt1] = (FLT_OR_DBL *)space((vars->maxD2+1)*sizeof(FLT_OR_DBL)); for(cnt2 = vars->l_min_values[ndx][cnt1]; cnt2 <= MIN2(vars->l_max_values[ndx][cnt1], vars->maxD2); cnt2+=2){ output[cnt1][cnt2] = vars->Q[ndx][cnt1][cnt2/2]; } } return output; } PUBLIC FLT_OR_DBL **TwoDpfold_circ(TwoDpfold_vars *vars, int distance1, int distance2){ unsigned int i; unsigned int maxD1 = 0; unsigned int maxD2 = 0; unsigned int mm; int cnt1, cnt2; FLT_OR_DBL **output; initialize_TwoDpfold_vars(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes2(vars); for(i=1; i<=(unsigned int)vars->reference_pt1[0]; i++) if(i < (unsigned int)vars->reference_pt1[i]) maxD1++; for(i=1; i<=(unsigned int)vars->reference_pt2[0]; i++) if(i < (unsigned int)vars->reference_pt2[i]) maxD2++; mm = maximumMatching(vars->sequence); maxD1 += mm; maxD2 += mm; if(distance1 >= 0){ if((unsigned int)distance1 > maxD1) fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1); maxD1 = (unsigned int)distance1; } if(distance2 >= 0){ if((unsigned int)distance2 > maxD2) fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2); maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (FLT_OR_DBL **) space(sizeof(FLT_OR_DBL*) * (maxD1+1)); pf2D_linear(vars); pf2D_circ(vars); for(cnt1 = vars->k_min_values_qc; cnt1 <= MIN2(vars->k_max_values_qc, vars->maxD1); cnt1++){ output[cnt1] = (FLT_OR_DBL *)space((vars->maxD2+1)*sizeof(FLT_OR_DBL)); for(cnt2 = vars->l_min_values_qc[cnt1]; cnt2 <= MIN2(vars->l_max_values_qc[cnt1], vars->maxD2); cnt2+=2){ output[cnt1][cnt2] = vars->Q_c[cnt1][cnt2/2]; } } return output; } PRIVATE void pf2D_linear(TwoDpfold_vars *vars){ char *sequence, *ptype; short *S1, *reference_pt1, *reference_pt2; unsigned int *referenceBPs1, *referenceBPs2, d, i, j, ij, seq_length, maxD1, maxD2, *mm1, *mm2, *bpdist; int *my_iindx, *jindx, circ, cnt1, cnt2, cnt3, cnt4; double max_real; FLT_OR_DBL *scale, Qmax; pf_paramT *pf_params; max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX; pf_params = vars->pf_params; sequence = vars->sequence; seq_length = vars->seq_length; maxD1 = vars->maxD1; maxD2 = vars->maxD2; S1 = vars->S1; ptype = vars->ptype; scale = vars->scale; reference_pt1 = vars->reference_pt1; reference_pt2 = vars->reference_pt2; my_iindx = vars->my_iindx; jindx = vars->jindx; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; dangles = vars->dangles; circ = vars->circ; mm1 = vars->mm1; mm2 = vars->mm2; bpdist = vars->bpdist; /*array initialization ; qb,qm,q qb,qm,q (i,j) are stored as ((n+1-i)*(n-i) div 2 + n+1-j */ for (j = 1; j<=seq_length; j++) for (i=(j>TURN?(j-TURN):1); i<=j; i++){ ij = my_iindx[i]-j; vars->k_min_values[ij] = 0; vars->k_max_values[ij] = 0; vars->l_min_values[ij] = (int *)space(sizeof(int)); vars->l_max_values[ij] = (int *)space(sizeof(int)); vars->l_min_values[ij][0] = 0; vars->l_max_values[ij][0] = 0; vars->Q[ij] = (FLT_OR_DBL **) space(sizeof(FLT_OR_DBL *)); vars->Q[ij][0] = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)); vars->Q[ij][0][0] = 1.0 * scale[j-i+1]; } for (d = TURN+2; d <= seq_length; d++) { /* i,j in [1..seq_length] */ #ifdef _OPENMP #pragma omp parallel for private(i, j, ij, cnt1, cnt2, cnt3, cnt4) #endif for (j = d; j <= seq_length; j++) { unsigned int k,l, kl, u, ii, dij; int no_close, type, type_2, tt, da, db, base_da, base_db; FLT_OR_DBL temp2, aux_en; i = j-d+1; ij = my_iindx[i]-j; dij = j - i - 1; type = ptype[ij]; no_close = (((type==3)||(type==4))&&no_closingGU); if (type) { /* we have a pair */ int k_min_b, k_max_b, l_min_b, l_max_b; int k_min_post_b, k_max_post_b, *l_min_post_b, *l_max_post_b; int update_b = 0; if(!vars->Q_B[ij]){ update_b = 1; k_min_b = l_min_b = 0; k_max_b = mm1[ij] + referenceBPs1[ij]; l_max_b = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_b, k_max_b, l_min_b, l_max_b, bpdist[ij], &vars->k_min_values_b[ij], &vars->k_max_values_b[ij], &vars->l_min_values_b[ij], &vars->l_max_values_b[ij] ); preparePosteriorBoundaries( vars->k_max_values_b[ij] - vars->k_min_values_b[ij] + 1, vars->k_min_values_b[ij], &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); prepareArray( &vars->Q_B[ij], vars->k_min_values_b[ij], vars->k_max_values_b[ij], vars->l_min_values_b[ij], vars->l_max_values_b[ij] ); } /* hairpin ----------------------------------------------*/ /* get distance to reference if closing the hairpin * d1a = dbp(T1_{i,j}, {i,j}) */ base_da = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_db = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_da + referenceBPs1[ij]; db = base_db + referenceBPs2[ij]; if(!no_close) if((da >= 0) && (db >= 0)){ if(((unsigned int)da<=maxD1) && ((unsigned int)db <= maxD2)){ vars->Q_B[ij][da][db/2] = exp_E_Hairpin(dij, type, S1[i+1], S1[j-1], sequence+i-1, pf_params) * scale[dij+2]; if(update_b){ updatePosteriorBoundaries( da, db, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else{ vars->Q_B_rem[ij] = exp_E_Hairpin(dij, type, S1[i+1], S1[j-1], sequence+i-1, pf_params) * scale[dij+2]; } } /*-------------------------------------------------------- check for elementary structures involving more than one closing pair. --------------------------------------------------------*/ for (k = i+1; k <= MIN2(j-2-TURN,i+MAXLOOP+1) ; k++) { unsigned int minl, ln_pre; minl = k + TURN + 1; ln_pre = dij + k; if(ln_pre > minl + MAXLOOP) minl = ln_pre - MAXLOOP - 1; for (l = minl; l < j; l++) { kl = my_iindx[k] - l; type_2 = ptype[kl]; if (type_2==0) continue; type_2 = rtype[type_2]; aux_en = exp_E_IntLoop(k-i-1, j-l-1, type, type_2, S1[i+1], S1[j-1], S1[k-1], S1[l+1], pf_params) * scale[k-i+j-l]; /* get distance to reference if closing the interior loop * d2 = dbp(S_{i,j}, S_{k,l} + {i,j}) */ da = base_da + referenceBPs1[ij] - referenceBPs1[kl]; db = base_db + referenceBPs2[ij] - referenceBPs2[kl]; if(vars->Q_B_rem[kl]){ vars->Q_B_rem[ij] += vars->Q_B_rem[kl] * aux_en; } if(!vars->Q_B[kl]) continue; for(cnt1 = vars->k_min_values_b[kl]; cnt1 <= vars->k_max_values_b[kl]; cnt1++) for(cnt2 = vars->l_min_values_b[kl][cnt1]; cnt2 <= vars->l_max_values_b[kl][cnt1]; cnt2 += 2){ if(((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)){ vars->Q_B[ij][cnt1 + da][(cnt2 + db)/2] += vars->Q_B[kl][cnt1][cnt2/2] * aux_en; if(update_b){ updatePosteriorBoundaries( da + cnt1, db + cnt2, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else{ vars->Q_B_rem[ij] += vars->Q_B[kl][cnt1][cnt2/2] * aux_en; } } } /* end l-loop */ } /* end k-loop */ /* multi-loop contribution ------------------------*/ if(!no_close){ for(u=i+TURN+2; u<j-TURN-2;u++){ tt = rtype[type]; temp2 = pf_params->expMLclosing * exp_E_MLstem(tt, S1[j-1], S1[i+1], pf_params) * scale[2]; if(vars->Q_M_rem[my_iindx[i+1]-u]){ if(vars->Q_M1[jindx[j-1]+u+1]) for(cnt1 = vars->k_min_values_m1[jindx[j-1]+u+1]; cnt1 <= vars->k_max_values_m1[jindx[j-1]+u+1]; cnt1++) for(cnt2 = vars->l_min_values_m1[jindx[j-1]+u+1][cnt1]; cnt2 <= vars->l_max_values_m1[jindx[j-1]+u+1][cnt1]; cnt2 += 2) vars->Q_B_rem[ij] += vars->Q_M_rem[my_iindx[i+1]-u] * vars->Q_M1[jindx[j-1]+u+1][cnt1][cnt2/2] * temp2; if(vars->Q_M1_rem[jindx[j-1]+u+1]) vars->Q_B_rem[ij] += vars->Q_M_rem[my_iindx[i+1]-u] * vars->Q_M1_rem[jindx[j-1]+u+1] * temp2; } if(vars->Q_M1_rem[jindx[j-1]+u+1]){ if(vars->Q_M[my_iindx[i+1]-u]) for(cnt1 = vars->k_min_values_m[my_iindx[i+1]-u]; cnt1 <= vars->k_max_values_m[my_iindx[i+1]-u]; cnt1++) for(cnt2 = vars->l_min_values_m[my_iindx[i+1]-u][cnt1]; cnt2 <= vars->l_max_values_m[my_iindx[i+1]-u][cnt1]; cnt2 += 2) vars->Q_B_rem[ij] += vars->Q_M[my_iindx[i+1]-u][cnt1][cnt2/2] * vars->Q_M1_rem[jindx[j-1]+u+1] * temp2; } /* get distance to reference if closing the multiloop * dist3 = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1}) */ da = base_da + referenceBPs1[ij] - referenceBPs1[my_iindx[i+1]-u] - referenceBPs1[my_iindx[u+1]-j+1]; db = base_db + referenceBPs2[ij] - referenceBPs2[my_iindx[i+1]-u] - referenceBPs2[my_iindx[u+1]-j+1]; if(!vars->Q_M[my_iindx[i+1]-u]) continue; if(!vars->Q_M1[jindx[j-1]+u+1]) continue; for(cnt1 = vars->k_min_values_m[my_iindx[i+1]-u]; cnt1 <= vars->k_max_values_m[my_iindx[i+1]-u]; cnt1++) for(cnt2 = vars->l_min_values_m[my_iindx[i+1]-u][cnt1]; cnt2 <= vars->l_max_values_m[my_iindx[i+1]-u][cnt1]; cnt2 += 2){ for(cnt3 = vars->k_min_values_m1[jindx[j-1]+u+1]; cnt3 <= vars->k_max_values_m1[jindx[j-1]+u+1]; cnt3++) for(cnt4 = vars->l_min_values_m1[jindx[j-1]+u+1][cnt3]; cnt4 <= vars->l_max_values_m1[jindx[j-1]+u+1][cnt3]; cnt4 += 2){ if(((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)){ vars->Q_B[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db)/2] += vars->Q_M[my_iindx[i+1]-u][cnt1][cnt2/2] * vars->Q_M1[jindx[j-1]+u+1][cnt3][cnt4/2] * temp2; if(update_b){ updatePosteriorBoundaries( cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else{ vars->Q_B_rem[ij] += vars->Q_M[my_iindx[i+1]-u][cnt1][cnt2/2] * vars->Q_M1[jindx[j-1]+u+1][cnt3][cnt4/2] * temp2; } } } } } if(update_b){ adjustArrayBoundaries(&vars->Q_B[ij], &vars->k_min_values_b[ij], &vars->k_max_values_b[ij], &vars->l_min_values_b[ij], &vars->l_max_values_b[ij], k_min_post_b, k_max_post_b, l_min_post_b, l_max_post_b ); } } /* end >> if (pair) << */ /* free ends ? -----------------------------------------*/ int k_min_m, k_max_m, l_min_m, l_max_m; int k_min_post_m, k_max_post_m, *l_min_post_m, *l_max_post_m; int update_m = 0; int k_min_m1, k_max_m1, l_min_m1, l_max_m1; int k_min_post_m1, k_max_post_m1, *l_min_post_m1, *l_max_post_m1; int update_m1 = 0; if(!vars->Q_M[ij]){ update_m = 1; k_min_m = l_min_m = 0; k_max_m = mm1[ij] + referenceBPs1[ij]; l_max_m = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_m, k_max_m, l_min_m, l_max_m, bpdist[ij], &vars->k_min_values_m[ij], &vars->k_max_values_m[ij], &vars->l_min_values_m[ij], &vars->l_max_values_m[ij] ); preparePosteriorBoundaries( vars->k_max_values_m[ij] - vars->k_min_values_m[ij] + 1, vars->k_min_values_m[ij], &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); prepareArray( &vars->Q_M[ij], vars->k_min_values_m[ij], vars->k_max_values_m[ij], vars->l_min_values_m[ij], vars->l_max_values_m[ij] ); } if(!vars->Q_M1[jindx[j]+i]){ update_m1 = 1; k_min_m1 = l_min_m1 = 0; k_max_m1 = mm1[ij] + referenceBPs1[ij]; l_max_m1 = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_m1, k_max_m1, l_min_m1, l_max_m1, bpdist[ij], &vars->k_min_values_m1[jindx[j]+i], &vars->k_max_values_m1[jindx[j]+i], &vars->l_min_values_m1[jindx[j]+i], &vars->l_max_values_m1[jindx[j]+i] ); preparePosteriorBoundaries( vars->k_max_values_m1[jindx[j]+i] - vars->k_min_values_m1[jindx[j]+i] + 1, vars->k_min_values_m1[jindx[j]+i], &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); prepareArray( &vars->Q_M1[jindx[j]+i], vars->k_min_values_m1[jindx[j]+i], vars->k_max_values_m1[jindx[j]+i], vars->l_min_values_m1[jindx[j]+i], vars->l_max_values_m1[jindx[j]+i] ); } /* j is unpaired */ da = referenceBPs1[ij] - referenceBPs1[ij+1]; db = referenceBPs2[ij] - referenceBPs2[ij+1]; if(vars->Q_M_rem[ij+1]) vars->Q_M_rem[ij] += vars->Q_M_rem[ij+1] * pf_params->expMLbase * scale[1]; if(vars->Q_M[ij+1]) for(cnt1 = vars->k_min_values_m[ij+1]; cnt1 <= vars->k_max_values_m[ij+1]; cnt1++){ for(cnt2 = vars->l_min_values_m[ij+1][cnt1]; cnt2 <= vars->l_max_values_m[ij+1][cnt1]; cnt2 += 2){ if(((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)){ vars->Q_M[ij][cnt1 + da][(cnt2 + db)/2] += vars->Q_M[ij+1][cnt1][cnt2/2] * pf_params->expMLbase * scale[1]; if(update_m){ updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else{ vars->Q_M_rem[ij] += vars->Q_M[ij+1][cnt1][cnt2/2] * pf_params->expMLbase * scale[1]; } } } if(vars->Q_M1_rem[jindx[j-1]+i]) vars->Q_M1_rem[jindx[j]+i] += vars->Q_M1_rem[jindx[j-1]+i] * pf_params->expMLbase * scale[1]; if(vars->Q_M1[jindx[j-1]+i]) for(cnt1 = vars->k_min_values_m1[jindx[j-1]+i]; cnt1 <= vars->k_max_values_m1[jindx[j-1]+i]; cnt1++) for(cnt2 = vars->l_min_values_m1[jindx[j-1]+i][cnt1]; cnt2 <= vars->l_max_values_m1[jindx[j-1]+i][cnt1]; cnt2 += 2){ if(((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)){ vars->Q_M1[jindx[j]+i][cnt1 + da][(cnt2 + db)/2] += vars->Q_M1[jindx[j-1]+i][cnt1][cnt2/2] * pf_params->expMLbase * scale[1]; if(update_m1){ updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); } } else{ vars->Q_M1_rem[jindx[j]+i] += vars->Q_M1[jindx[j-1]+i][cnt1][cnt2/2] * pf_params->expMLbase * scale[1]; } } /* j pairs with i */ if((!no_close) && type){ FLT_OR_DBL aux_en = exp_E_MLstem(type, (i>1) || circ ? S1[i-1] : -1, (j<seq_length) || circ ? S1[j+1] : -1, pf_params); if(vars->Q_B_rem[ij]){ vars->Q_M_rem[ij] += vars->Q_B_rem[ij] * aux_en; vars->Q_M1_rem[jindx[j]+i] += vars->Q_B_rem[ij] * aux_en; } if(vars->Q_B[ij]){ for(cnt1 = vars->k_min_values_b[ij]; cnt1 <= vars->k_max_values_b[ij]; cnt1++) for(cnt2 = vars->l_min_values_b[ij][cnt1]; cnt2 <= vars->l_max_values_b[ij][cnt1]; cnt2 += 2){ vars->Q_M[ij][cnt1][cnt2/2] += vars->Q_B[ij][cnt1][cnt2/2] * aux_en; if(update_m){ updatePosteriorBoundaries(cnt1, cnt2, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } vars->Q_M1[jindx[j]+i][cnt1][cnt2/2] += vars->Q_B[ij][cnt1][cnt2/2] * aux_en; if(update_m1){ updatePosteriorBoundaries(cnt1, cnt2, &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); } } } } /* j pairs with k: i<k<j */ ii = my_iindx[i]; for (k=i+1; k<=j; k++){ tt = ptype[my_iindx[k]-j]; temp2 = exp_E_MLstem(tt, S1[k-1], (j<seq_length) || circ ? S1[j+1] : -1, pf_params); if(vars->Q_B_rem[my_iindx[k]-j]){ vars->Q_M_rem[ij] += vars->Q_B_rem[my_iindx[k]-j] * pow(pf_params->expMLbase, (double)(k-i)) * scale[k-i] * temp2; if(vars->Q_M[ii-k+1]) for(cnt1 = vars->k_min_values_m[ii-k+1]; cnt1 <= vars->k_max_values_m[ii-k+1]; cnt1++) for(cnt2 = vars->l_min_values_m[ii-k+1][cnt1]; cnt2 <= vars->l_max_values_m[ii-k+1][cnt1]; cnt2 += 2) vars->Q_M_rem[ij] += vars->Q_M[ii-k+1][cnt1][cnt2/2] * vars->Q_B_rem[my_iindx[k]-j] * temp2; if(vars->Q_M_rem[ii-k+1]) vars->Q_M_rem[ij] += vars->Q_M_rem[ii-k+1] * vars->Q_B_rem[my_iindx[k]-j] * temp2; } if(vars->Q_M_rem[ii-k+1]){ if(vars->Q_B[my_iindx[k]-j]) for(cnt1 = vars->k_min_values_b[my_iindx[k]-j]; cnt1 <= vars->k_max_values_b[my_iindx[k]-j]; cnt1++) for(cnt2 = vars->l_min_values_b[my_iindx[k]-j][cnt1]; cnt2 <= vars->l_max_values_b[my_iindx[k]-j][cnt1]; cnt2 += 2) vars->Q_M_rem[ij] += vars->Q_M_rem[my_iindx[k]-j] * vars->Q_B[my_iindx[k]-j][cnt1][cnt2/2] * temp2; } /* add contributions of QM(i,k-1)*QB(k,j)*e^b and * e^((k-i) * c) * QB(k,j) * e^b * therefor we need d1a = dbp(T1_{i,j}, T1_{i,k-1} + T1_{k,j}), * d1b = dbp(T2_{i,j}, T2_{i,k-1} + T2_{k,j}) * d1c = dbp(T1_{i,j}, T1_{k,j})circ = 0; * d1d = dbp(T2_{i,j}, T2_{k,j}) */ da = referenceBPs1[ij] - referenceBPs1[my_iindx[k]-j]; db = referenceBPs2[ij] - referenceBPs2[my_iindx[k]-j]; if(!vars->Q_B[my_iindx[k]-j]) continue; for(cnt1 = vars->k_min_values_b[my_iindx[k]-j]; cnt1 <= vars->k_max_values_b[my_iindx[k]-j]; cnt1++) for(cnt2 = vars->l_min_values_b[my_iindx[k]-j][cnt1]; cnt2 <= vars->l_max_values_b[my_iindx[k]-j][cnt1]; cnt2 += 2){ if(((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)){ vars->Q_M[ij][cnt1 + da][(cnt2 + db)/2] += vars->Q_B[my_iindx[k]-j][cnt1][cnt2/2] * pow(pf_params->expMLbase, (double)(k-i)) * scale[k-i] * temp2; if(update_m){ updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else{ vars->Q_M_rem[ij] += vars->Q_B[my_iindx[k]-j][cnt1][cnt2/2] * pow(pf_params->expMLbase, (double)(k-i)) * scale[k-i] * temp2; } } if(!vars->Q_M[ii-k+1]) continue; da -= referenceBPs1[ii-k+1]; db -= referenceBPs2[ii-k+1]; for(cnt1 = vars->k_min_values_m[ii-k+1]; cnt1 <= vars->k_max_values_m[ii-k+1]; cnt1++) for(cnt2 = vars->l_min_values_m[ii-k+1][cnt1]; cnt2 <= vars->l_max_values_m[ii-k+1][cnt1]; cnt2 += 2) for(cnt3 = vars->k_min_values_b[my_iindx[k]-j]; cnt3 <= vars->k_max_values_b[my_iindx[k]-j]; cnt3++) for(cnt4 = vars->l_min_values_b[my_iindx[k]-j][cnt3]; cnt4 <= vars->l_max_values_b[my_iindx[k]-j][cnt3]; cnt4 += 2){ if(((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)){ vars->Q_M[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db)/2] += vars->Q_M[ii-k+1][cnt1][cnt2/2] * vars->Q_B[my_iindx[k]-j][cnt3][cnt4/2] * temp2; if(update_m){ updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else{ vars->Q_M_rem[ij] += vars->Q_M[ii-k+1][cnt1][cnt2/2] * vars->Q_B[my_iindx[k]-j][cnt3][cnt4/2] * temp2; } } } if(update_m){ adjustArrayBoundaries(&vars->Q_M[ij], &vars->k_min_values_m[ij], &vars->k_max_values_m[ij], &vars->l_min_values_m[ij], &vars->l_max_values_m[ij], k_min_post_m, k_max_post_m, l_min_post_m, l_max_post_m ); } if(update_m1){ adjustArrayBoundaries(&vars->Q_M1[jindx[j]+i], &vars->k_min_values_m1[jindx[j]+i], &vars->k_max_values_m1[jindx[j]+i], &vars->l_min_values_m1[jindx[j]+i], &vars->l_max_values_m1[jindx[j]+i], k_min_post_m1, k_max_post_m1, l_min_post_m1, l_max_post_m1 ); } /* compute contributions for Q(i,j) */ int k_min, k_max, l_min, l_max; int k_min_post, k_max_post, *l_min_post, *l_max_post; int update_q = 0; if(!vars->Q[ij]){ update_q = 1; k_min = l_min = 0; k_max = mm1[ij] + referenceBPs1[ij]; l_max = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min, k_max, l_min, l_max, bpdist[ij], &vars->k_min_values[ij], &vars->k_max_values[ij], &vars->l_min_values[ij], &vars->l_max_values[ij] ); preparePosteriorBoundaries( vars->k_max_values[ij] - vars->k_min_values[ij] + 1, vars->k_min_values[ij], &k_min_post, &k_max_post, &l_min_post, &l_max_post ); prepareArray( &vars->Q[ij], vars->k_min_values[ij], vars->k_max_values[ij], vars->l_min_values[ij], vars->l_max_values[ij] ); } if (type){ aux_en = exp_E_ExtLoop(type, (i>1) || circ ? S1[i-1] : -1, (j < seq_length) || circ ? S1[j+1] : -1, pf_params); if(vars->Q_B_rem[ij]) vars->Q_rem[ij] += vars->Q_B_rem[ij] * aux_en; if(vars->Q_B[ij]) for(cnt1 = vars->k_min_values_b[ij]; cnt1 <= vars->k_max_values_b[ij]; cnt1++) for(cnt2 = vars->l_min_values_b[ij][cnt1]; cnt2 <= vars->l_max_values_b[ij][cnt1]; cnt2 += 2){ vars->Q[ij][cnt1][cnt2/2] += vars->Q_B[ij][cnt1][cnt2/2] * aux_en; if(update_q){ updatePosteriorBoundaries(cnt1, cnt2, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } } /* j is unpaired */ if(vars->Q_rem[ij+1]) vars->Q_rem[ij] += vars->Q_rem[ij+1] * scale[1]; /* da = dbp(T1_{i,j}, T1_{i,j-1}) * db = dbp(T2_{i,j}, T2_{i,j-1}) */ da = referenceBPs1[ij] - referenceBPs1[ij+1]; db = referenceBPs2[ij] - referenceBPs2[ij+1]; if(vars->Q[ij+1]) for(cnt1 = vars->k_min_values[ij+1]; cnt1 <= vars->k_max_values[ij+1]; cnt1++) for(cnt2 = vars->l_min_values[ij+1][cnt1]; cnt2 <= vars->l_max_values[ij+1][cnt1]; cnt2 += 2){ if(((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)){ vars->Q[ij][cnt1 + da][(cnt2 + db)/2] += vars->Q[ij+1][cnt1][cnt2/2] * scale[1]; if(update_q){ updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } else{ vars->Q_rem[ij] += vars->Q[ij+1][cnt1][cnt2/2] * scale[1]; } } for(k=j-TURN-1; k>i; k--){ tt = ptype[my_iindx[k]-j]; temp2 = exp_E_ExtLoop(tt, S1[k-1], (j<seq_length) || circ ? S1[j+1] : -1, pf_params); if(vars->Q_rem[my_iindx[i]-k+1]){ if(vars->Q_B[my_iindx[k]-j]) for(cnt1 = vars->k_min_values_b[my_iindx[k]-j]; cnt1 <= vars->k_max_values_b[my_iindx[k]-j]; cnt1++) for(cnt2 = vars->l_min_values_b[my_iindx[k]-j][cnt1]; cnt2 <= vars->l_max_values_b[my_iindx[k]-j][cnt1]; cnt2 += 2) vars->Q_rem[ij] += vars->Q_rem[my_iindx[i]-k+1] * vars->Q_B[my_iindx[k]-j][cnt1][cnt2/2] * temp2; if(vars->Q_B_rem[my_iindx[k]-j]) vars->Q_rem[ij] += vars->Q_rem[my_iindx[i]-k+1] * vars->Q_B_rem[my_iindx[k]-j] * temp2; } if(vars->Q_B_rem[my_iindx[k]-j]){ if(vars->Q[my_iindx[i]-k+1]) for(cnt1 = vars->k_min_values[my_iindx[i]-k+1]; cnt1 <= vars->k_max_values[my_iindx[i]-k+1]; cnt1++) for(cnt2 = vars->l_min_values[my_iindx[i]-k+1][cnt1]; cnt2 <= vars->l_max_values[my_iindx[i]-k+1][cnt1]; cnt2 += 2) vars->Q_rem[ij] += vars->Q[my_iindx[i]-k+1][cnt1][cnt2/2] * vars->Q_B_rem[my_iindx[k]-j] * temp2; } /* da = dbp{T1_{i,j}, T1_{k,j} * db = dbp{T2_{i,j}, T2_{k,j}} */ da = referenceBPs1[ij] - referenceBPs1[my_iindx[k] - j] - referenceBPs1[my_iindx[i]-k+1]; db = referenceBPs2[ij] - referenceBPs2[my_iindx[k] - j] - referenceBPs2[my_iindx[i]-k+1]; if(!vars->Q[my_iindx[i]-k+1]) continue; if(!vars->Q_B[my_iindx[k]-j]) continue; for(cnt1 = vars->k_min_values[my_iindx[i]-k+1]; cnt1 <= vars->k_max_values[my_iindx[i]-k+1]; cnt1++) for(cnt2 = vars->l_min_values[my_iindx[i]-k+1][cnt1]; cnt2 <= vars->l_max_values[my_iindx[i]-k+1][cnt1]; cnt2 += 2) for(cnt3 = vars->k_min_values_b[my_iindx[k]-j]; cnt3 <= vars->k_max_values_b[my_iindx[k]-j]; cnt3++) for(cnt4 = vars->l_min_values_b[my_iindx[k]-j][cnt3]; cnt4 <= vars->l_max_values_b[my_iindx[k]-j][cnt3]; cnt4 += 2){ if(((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)){ vars->Q[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db)/2] += vars->Q[my_iindx[i]-k+1][cnt1][cnt2/2] * vars->Q_B[my_iindx[k]-j][cnt3][cnt4/2] * temp2; if(update_q){ updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } else{ vars->Q_rem[ij] += vars->Q[my_iindx[i]-k+1][cnt1][cnt2/2] * vars->Q_B[my_iindx[k]-j][cnt3][cnt4/2] * temp2; } } } if(update_q){ adjustArrayBoundaries(&vars->Q[ij], &vars->k_min_values[ij], &vars->k_max_values[ij], &vars->l_min_values[ij], &vars->l_max_values[ij], k_min_post, k_max_post, l_min_post, l_max_post ); } #if 1 for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++){ for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2 += 2){ if(vars->Q[ij][cnt1][cnt2/2] > Qmax) { Qmax = vars->Q[ij][cnt1][cnt2/2]; if (Qmax > max_real/10.) fprintf(stderr, "Q close to overflow: %u %u %g\n", i,j,vars->Q[ij][cnt1][cnt2/2]); } if(vars->Q[ij][cnt1][cnt2/2] >= max_real) { PRIVATE char msg[128]; sprintf(msg, "overflow in pf_fold while calculating q[%u,%u]\n" "use larger pf_scale", i,j); nrerror(msg); } } } #endif } /* end of j-loop */ } } /* calculate partition function for circular case */ /* NOTE: this is the postprocessing step ONLY */ /* You have to call pf2D_linear first to calculate */ /* complete circular case!!! */ PRIVATE void pf2D_circ(TwoDpfold_vars *vars){ unsigned int d, p, q, pq, k, l, kl, u, da, db, seq_length, maxD1, maxD2, base_d1, base_d2, *mm1, *mm2, *bpdist; int *my_iindx, *jindx, type, cnt1, cnt2, cnt3, cnt4; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *sequence, *ptype; FLT_OR_DBL *scale; pf_paramT *pf_params; /* holds all [unscaled] pf parameters */ pf_params = vars->pf_params; sequence = vars->sequence; seq_length = vars->seq_length; maxD1 = vars->maxD1; maxD2 = vars->maxD2; S1 = vars->S1; ptype = vars->ptype; scale = vars->scale; my_iindx = vars->my_iindx; jindx = vars->jindx; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; dangles = vars->dangles; mm1 = vars->mm1; mm2 = vars->mm2; bpdist = vars->bpdist; FLT_OR_DBL ***Q_B, ***Q_M, ***Q_M1; FLT_OR_DBL *Q_B_rem, *Q_M_rem, *Q_M1_rem; int **l_min_values_b, **l_max_values_b, **l_min_values_m, **l_max_values_m, **l_min_values_m1, **l_max_values_m1; int *k_min_values_b, *k_max_values_b,*k_min_values_m, *k_max_values_m,*k_min_values_m1, *k_max_values_m1; Q_B = vars->Q_B; l_min_values_b = vars->l_min_values_b; l_max_values_b = vars->l_max_values_b; k_min_values_b = vars->k_min_values_b; k_max_values_b = vars->k_max_values_b; Q_M = vars->Q_M; l_min_values_m = vars->l_min_values_m; l_max_values_m = vars->l_max_values_m; k_min_values_m = vars->k_min_values_m; k_max_values_m = vars->k_max_values_m; Q_M1 = vars->Q_M1; l_min_values_m1 = vars->l_min_values_m1; l_max_values_m1 = vars->l_max_values_m1; k_min_values_m1 = vars->k_min_values_m1; k_max_values_m1 = vars->k_max_values_m1; Q_B_rem = vars->Q_B_rem; Q_M_rem = vars->Q_M_rem; Q_M1_rem = vars->Q_M1_rem; vars->Q_c_rem = 0.; vars->Q_cH_rem = 0.; vars->Q_cI_rem = 0.; vars->Q_cM_rem = 0.; /* construct qm2 matrix from qm1 entries */ #ifdef _OPENMP #pragma omp parallel for private(d, k, l, da, db, cnt1, cnt2, cnt3, cnt4) #endif for(k=1; k<seq_length-TURN-1; k++){ int k_min_m2, k_max_m2, l_min_m2, l_max_m2; int k_min_post_m2, k_max_post_m2, *l_min_post_m2, *l_max_post_m2; int update_m2 = 0; if(!vars->Q_M2[k]){ update_m2 = 1; k_min_m2 = l_min_m2 = 0; k_max_m2 = mm1[my_iindx[k]-seq_length] + referenceBPs1[my_iindx[k] - seq_length]; l_max_m2 = mm2[my_iindx[k]-seq_length] + referenceBPs2[my_iindx[k] - seq_length]; prepareBoundaries(k_min_m2, k_max_m2, l_min_m2, l_max_m2, bpdist[my_iindx[k]-seq_length], &vars->k_min_values_m2[k], &vars->k_max_values_m2[k], &vars->l_min_values_m2[k], &vars->l_max_values_m2[k] ); preparePosteriorBoundaries( vars->k_max_values_m2[k] - vars->k_min_values_m2[k] + 1, vars->k_min_values_m2[k], &k_min_post_m2, &k_max_post_m2, &l_min_post_m2, &l_max_post_m2 ); prepareArray( &vars->Q_M2[k], vars->k_min_values_m2[k], vars->k_max_values_m2[k], vars->l_min_values_m2[k], vars->l_max_values_m2[k] ); } /* construct Q_M2 */ for (l=k+TURN+1; l<seq_length-TURN-1; l++){ if(Q_M1_rem[jindx[l]+k]){ if(Q_M1[jindx[seq_length]+l+1]){ for(cnt1 = k_min_values_m1[jindx[seq_length]+l+1]; cnt1 <= k_max_values_m1[jindx[seq_length]+l+1]; cnt1++) for(cnt2 = l_min_values_m1[jindx[seq_length]+l+1][cnt1]; cnt2 <= l_max_values_m1[jindx[seq_length]+l+1][cnt1]; cnt2 += 2) vars->Q_M2_rem[k] += Q_M1_rem[jindx[l]+k] * Q_M1[jindx[seq_length]+l+1][cnt1][cnt2/2]; } if(Q_M1_rem[jindx[seq_length]+l+1]) vars->Q_M2_rem[k] += Q_M1_rem[jindx[l]+k] * Q_M1_rem[jindx[seq_length]+l+1]; } if(Q_M1_rem[jindx[seq_length]+l+1]){ if(Q_M1[jindx[l]+k]) for(cnt1 = k_min_values_m1[jindx[l]+k]; cnt1 <= k_max_values_m1[jindx[l]+k]; cnt1++) for(cnt2 = l_min_values_m1[jindx[l]+k][cnt1]; cnt2 <= l_max_values_m1[jindx[l]+k][cnt1]; cnt2 += 2) vars->Q_M2_rem[k] += Q_M1[jindx[l]+k][cnt1][cnt2/2]*Q_M1_rem[jindx[seq_length]+l+1]; } if(vars->Q_M1[jindx[l]+k] && vars->Q_M1[jindx[seq_length] + l + 1]){ da = referenceBPs1[my_iindx[k]-seq_length] - referenceBPs1[my_iindx[k]-l] - referenceBPs1[my_iindx[l+1]-seq_length]; db = referenceBPs2[my_iindx[k]-seq_length] - referenceBPs2[my_iindx[k]-l] - referenceBPs2[my_iindx[l+1]-seq_length]; for(cnt1 = k_min_values_m1[jindx[l]+k]; cnt1 <= k_max_values_m1[jindx[l]+k]; cnt1++) for(cnt2 = l_min_values_m1[jindx[l]+k][cnt1]; cnt2 <= l_max_values_m1[jindx[l]+k][cnt1]; cnt2+=2){ for(cnt3 = k_min_values_m1[jindx[seq_length] + l + 1]; cnt3 <= k_max_values_m1[jindx[seq_length] + l + 1]; cnt3++) for(cnt4 = l_min_values_m1[jindx[seq_length] + l + 1][cnt3]; cnt4 <= l_max_values_m1[jindx[seq_length] + l + 1][cnt3]; cnt4+=2){ if(((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)){ vars->Q_M2[k][cnt1 + cnt3 + da][(cnt2 + cnt4 + db)/2] += Q_M1[jindx[l]+k][cnt1][cnt2/2] * Q_M1[jindx[seq_length] + l + 1][cnt3][cnt4/2]; if(update_m2){ updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_m2, &k_max_post_m2, &l_min_post_m2, &l_max_post_m2 ); } } else{ vars->Q_M2_rem[k] += Q_M1[jindx[l]+k][cnt1][cnt2/2] * Q_M1[jindx[seq_length] + l + 1][cnt3][cnt4/2]; } } } } } if(update_m2){ adjustArrayBoundaries(&vars->Q_M2[k], &vars->k_min_values_m2[k], &vars->k_max_values_m2[k], &vars->l_min_values_m2[k], &vars->l_max_values_m2[k], k_min_post_m2, k_max_post_m2, l_min_post_m2, l_max_post_m2 ); } } base_d1 = referenceBPs1[my_iindx[1]-seq_length]; base_d2 = referenceBPs2[my_iindx[1]-seq_length]; int min_k, max_k, max_l, min_l; int min_k_real, max_k_real, min_k_real_qcH, max_k_real_qcH, min_k_real_qcI, max_k_real_qcI, min_k_real_qcM, max_k_real_qcM; int *min_l_real, *max_l_real, *min_l_real_qcH, *max_l_real_qcH, *min_l_real_qcI, *max_l_real_qcI,*min_l_real_qcM, *max_l_real_qcM; int update_c, update_cH, update_cI, update_cM; update_c = update_cH = update_cI, update_cM = 0; min_k = min_l = 0; max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length]; max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length]; #ifdef _OPENMP #pragma omp sections { #pragma omp section { #endif if(!vars->Q_c){ update_c = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &vars->k_min_values_qc, &vars->k_max_values_qc, &vars->l_min_values_qc, &vars->l_max_values_qc ); prepareArray( &vars->Q_c, vars->k_min_values_qc, vars->k_max_values_qc, vars->l_min_values_qc, vars->l_max_values_qc ); preparePosteriorBoundaries( max_k - min_k + 1, min_k, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } #ifdef _OPENMP } #pragma omp section { #endif if(!vars->Q_cH){ update_cH = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &vars->k_min_values_qcH, &vars->k_max_values_qcH, &vars->l_min_values_qcH, &vars->l_max_values_qcH ); prepareArray( &vars->Q_cH, vars->k_min_values_qcH, vars->k_max_values_qcH, vars->l_min_values_qcH, vars->l_max_values_qcH ); preparePosteriorBoundaries( max_k - min_k + 1, min_k, &min_k_real_qcH, &max_k_real_qcH, &min_l_real_qcH, &max_l_real_qcH ); } #ifdef _OPENMP } #pragma omp section { #endif if(!vars->Q_cI){ update_cI = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &vars->k_min_values_qcI, &vars->k_max_values_qcI, &vars->l_min_values_qcI, &vars->l_max_values_qcI ); prepareArray( &vars->Q_cI, vars->k_min_values_qcI, vars->k_max_values_qcI, vars->l_min_values_qcI, vars->l_max_values_qcI ); preparePosteriorBoundaries( max_k - min_k + 1, min_k, &min_k_real_qcI, &max_k_real_qcI, &min_l_real_qcI, &max_l_real_qcI ); } #ifdef _OPENMP } #pragma omp section { #endif if(!vars->Q_cM){ update_cM = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &vars->k_min_values_qcM, &vars->k_max_values_qcM, &vars->l_min_values_qcM, &vars->l_max_values_qcM ); prepareArray( &vars->Q_cM, vars->k_min_values_qcM, vars->k_max_values_qcM, vars->l_min_values_qcM, vars->l_max_values_qcM ); preparePosteriorBoundaries( max_k - min_k + 1, min_k, &min_k_real_qcM, &max_k_real_qcM, &min_l_real_qcM, &max_l_real_qcM ); } #ifdef _OPENMP } } #endif for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */ #ifdef _OPENMP #pragma omp parallel for private(p, q, pq, k, l, kl, u, da, db, type, cnt1, cnt2, cnt3, cnt4) #endif for (q = d; q <= seq_length; q++) { FLT_OR_DBL qot; char loopseq[10]; p = q - d + 1; pq = my_iindx[p]-q; /* 1. get exterior hairpin contribution */ u = seq_length-q + p-1; if (u<TURN) continue; type = ptype[pq]; if (!type) continue; if(((type==3)||(type==4))&&no_closingGU) continue; /* cause we want to calc the exterior loops, we need the reversed pair type from now on */ type=rtype[type]; if (u<7){ strcpy(loopseq , sequence+q-1); strncat(loopseq, sequence, p); } /* get distance to reference if closing the hairpin * da = dbp(T1_[1,n}, T1_{p,q}) * db = dbp(T2_{1,n}, T2_{p,q}) */ da = base_d1 - referenceBPs1[pq]; db = base_d2 - referenceBPs2[pq]; qot = exp_E_Hairpin(u, type, S1[q+1], S1[p-1], loopseq, pf_params) * scale[u]; if(Q_B_rem[pq]) vars->Q_cH_rem += Q_B_rem[pq] * qot; if(Q_B[pq]){ for(cnt1 = k_min_values_b[pq]; cnt1 <= k_max_values_b[pq]; cnt1++) for(cnt2 = l_min_values_b[pq][cnt1]; cnt2 <= l_max_values_b[pq][cnt1]; cnt2 += 2){ if(((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)){ vars->Q_cH[cnt1 + da][(cnt2 + db)/2] += Q_B[pq][cnt1][cnt2/2] * qot; if(update_cH){ updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &min_k_real_qcH, &max_k_real_qcH, &min_l_real_qcH, &max_l_real_qcH ); } } else{ vars->Q_cH_rem += Q_B[pq][cnt1][cnt2/2] * qot; } } } /* 2. exterior interior loops, i "define" the (k,l) pair as "outer pair" */ /* so "outer type" is rtype[type[k,l]] and inner type is type[p,q] */ if(Q_B_rem[pq]) for(k=q+1; k < seq_length; k++){ unsigned int ln1, lstart, ln_pre; ln1 = k - q - 1; if(ln1+p-1>MAXLOOP) break; lstart = k + TURN + 1; ln_pre = ln1 + p + seq_length; if(ln_pre > lstart + MAXLOOP) lstart = ln_pre - MAXLOOP - 1; for(l=lstart;l <= seq_length; l++){ unsigned int ln2; int type2; kl = my_iindx[k]-l; ln2 = (p - 1) + (seq_length - l); if((ln1+ln2) > MAXLOOP) continue; type2 = ptype[kl]; if(!type2) continue; qot = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[l+1], S1[k-1], S1[p-1], S1[q+1], pf_params) * scale[ln1+ln2]; if(Q_B_rem[kl]) vars->Q_cI_rem += Q_B_rem[pq] * Q_B_rem[kl] * qot; if(Q_B[kl]) for(cnt1 = k_min_values_b[kl]; cnt1 <= k_max_values_b[kl]; cnt1++) for(cnt2 = l_min_values_b[kl][cnt1]; cnt2 <= l_max_values_b[kl][cnt1]; cnt2 += 2) vars->Q_cI_rem += Q_B_rem[pq] * Q_B[kl][cnt1][cnt2/2] * qot; } } if(Q_B[pq]) for(k=q+1; k < seq_length; k++){ unsigned int ln1, lstart, ln_pre; ln1 = k - q - 1; if(ln1+p-1>MAXLOOP) break; lstart = k + TURN + 1; ln_pre = ln1 + p + seq_length; if(ln_pre > lstart + MAXLOOP) lstart = ln_pre - MAXLOOP - 1; for(l=lstart;l <= seq_length; l++){ unsigned int ln2; int type2; kl = my_iindx[k]-l; ln2 = (p - 1) + (seq_length - l); if((ln1+ln2) > MAXLOOP) continue; type2 = ptype[kl]; if(!type2) continue; qot = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[l+1], S1[k-1], S1[p-1], S1[q+1], pf_params) * scale[ln1+ln2]; if(Q_B_rem[kl]){ for(cnt1 = k_min_values_b[pq]; cnt1 <= k_max_values_b[pq]; cnt1++) for(cnt2 = l_min_values_b[pq][cnt1]; cnt2 <= l_max_values_b[pq][cnt1]; cnt2 += 2) vars->Q_cI_rem += Q_B[pq][cnt1][cnt2/2] * Q_B_rem[kl] * qot; } if(!Q_B[kl]) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{k,l}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{k,l}) */ da = base_d1 - referenceBPs1[pq] - referenceBPs1[kl]; db = base_d2 - referenceBPs2[pq] - referenceBPs2[kl]; for(cnt1 = k_min_values_b[pq]; cnt1 <= k_max_values_b[pq]; cnt1++) for(cnt2 = l_min_values_b[pq][cnt1]; cnt2 <= l_max_values_b[pq][cnt1]; cnt2+=2) for(cnt3 = k_min_values_b[kl]; cnt3 <= k_max_values_b[kl]; cnt3++) for(cnt4 = l_min_values_b[kl][cnt3]; cnt4 <= l_max_values_b[kl][cnt3]; cnt4+=2){ if(((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)){ vars->Q_cI[cnt1 + cnt3 + da][(cnt2 + cnt4 + db)/2] += Q_B[pq][cnt1][cnt2/2] * Q_B[kl][cnt3][cnt4/2] * qot; if(update_cI){ updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &min_k_real_qcI, &max_k_real_qcI, &min_l_real_qcI, &max_l_real_qcI ); } } else{ vars->Q_cI_rem += Q_B[pq][cnt1][cnt2/2] * Q_B[kl][cnt3][cnt4/2] * qot; } } } } } if(update_cH){ adjustArrayBoundaries(&vars->Q_cH, &vars->k_min_values_qcH, &vars->k_max_values_qcH, &vars->l_min_values_qcH, &vars->l_max_values_qcH, min_k_real_qcH, max_k_real_qcH, min_l_real_qcH, max_l_real_qcH ); } if(update_cI){ adjustArrayBoundaries(&vars->Q_cI, &vars->k_min_values_qcI, &vars->k_max_values_qcI, &vars->l_min_values_qcI, &vars->l_max_values_qcI, min_k_real_qcI, max_k_real_qcI, min_l_real_qcI, max_l_real_qcI ); } /* 3. Multiloops */ if(seq_length > 2*TURN-3) #ifdef _OPENMP #pragma omp parallel for private(k, da, db, cnt1, cnt2, cnt3, cnt4) #endif for(k=TURN+2; k<seq_length-2*TURN-3; k++){ if(Q_M_rem[my_iindx[1]-k]){ if(vars->Q_M2[k+1]) for(cnt1 = vars->k_min_values_m2[k+1]; cnt1 <= vars->k_max_values_m2[k+1]; cnt1++) for(cnt2 = vars->l_min_values_m2[k+1][cnt1]; cnt2 <= vars->l_max_values_m2[k+1][cnt1]; cnt2 += 2) vars->Q_cM_rem += Q_M_rem[my_iindx[1]-k] * vars->Q_M2[k+1][cnt1][cnt2/2] * pf_params->expMLclosing; if(vars->Q_M2_rem[k+1]) vars->Q_cM_rem += Q_M_rem[my_iindx[1]-k] * vars->Q_M2_rem[k+1] * pf_params->expMLclosing; } if(vars->Q_M2_rem[k+1]){ if(Q_M[my_iindx[1]-k]) for(cnt1 = k_min_values_m[my_iindx[1]-k]; cnt1 <= k_max_values_m[my_iindx[1]-k]; cnt1++) for(cnt2 = l_min_values_m[my_iindx[1]-k][cnt1]; cnt2 <= l_max_values_m[my_iindx[1]-k][cnt1]; cnt2 += 2) vars->Q_cM_rem += Q_M[my_iindx[1]-k][cnt1][cnt2/2] * vars->Q_M2_rem[k+1] * pf_params->expMLclosing; } /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ da = base_d1 - referenceBPs1[my_iindx[1]-k] - referenceBPs1[my_iindx[k+1]-seq_length]; db = base_d2 - referenceBPs2[my_iindx[1]-k] - referenceBPs2[my_iindx[k+1]-seq_length]; if(Q_M[my_iindx[1]-k] && vars->Q_M2[k+1]) for(cnt1 = k_min_values_m[my_iindx[1]-k]; cnt1 <= k_max_values_m[my_iindx[1]-k]; cnt1++) for(cnt2 = l_min_values_m[my_iindx[1]-k][cnt1]; cnt2 <= l_max_values_m[my_iindx[1]-k][cnt1]; cnt2+=2) for(cnt3 = vars->k_min_values_m2[k+1]; cnt3 <= vars->k_max_values_m2[k+1]; cnt3++) for(cnt4 = vars->l_min_values_m2[k+1][cnt3]; cnt4 <= vars->l_max_values_m2[k+1][cnt3]; cnt4+=2){ if(((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)){ vars->Q_cM[cnt1 + cnt3 + da][(cnt2 + cnt4 + db)/2] += Q_M[my_iindx[1]-k][cnt1][cnt2/2] * vars->Q_M2[k+1][cnt3][cnt4/2] * pf_params->expMLclosing; if(update_cM){ updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &min_k_real_qcM, &max_k_real_qcM, &min_l_real_qcM, &max_l_real_qcM ); } } else{ vars->Q_cM_rem += Q_M[my_iindx[1]-k][cnt1][cnt2/2] * vars->Q_M2[k+1][cnt3][cnt4/2] * pf_params->expMLclosing; } } } if(update_cM){ adjustArrayBoundaries(&vars->Q_cM, &vars->k_min_values_qcM, &vars->k_max_values_qcM, &vars->l_min_values_qcM, &vars->l_max_values_qcM, min_k_real_qcM, max_k_real_qcM, min_l_real_qcM, max_l_real_qcM ); } for(cnt1 = vars->k_min_values_qcH; cnt1 <= vars->k_max_values_qcH; cnt1++) for(cnt2 = vars->l_min_values_qcH[cnt1]; cnt2 <= vars->l_max_values_qcH[cnt1]; cnt2 += 2){ vars->Q_c[cnt1][cnt2/2] += vars->Q_cH[cnt1][cnt2/2]; if(update_c){ updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } for(cnt1 = vars->k_min_values_qcI; cnt1 <= vars->k_max_values_qcI; cnt1++) for(cnt2 = vars->l_min_values_qcI[cnt1]; cnt2 <= vars->l_max_values_qcI[cnt1]; cnt2 += 2){ vars->Q_c[cnt1][cnt2/2] += vars->Q_cI[cnt1][cnt2/2]; if(update_c){ updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } for(cnt1 = vars->k_min_values_qcM; cnt1 <= vars->k_max_values_qcM; cnt1++) for(cnt2 = vars->l_min_values_qcM[cnt1]; cnt2 <= vars->l_max_values_qcM[cnt1]; cnt2 += 2){ vars->Q_c[cnt1][cnt2/2] += vars->Q_cM[cnt1][cnt2/2]; if(update_c){ updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } vars->Q_c_rem = vars->Q_cH_rem + vars->Q_cI_rem + vars->Q_cM_rem; /* add the case were structure is unfolded chain */ if((referenceBPs1[my_iindx[1]-seq_length] <= maxD1) && (referenceBPs2[my_iindx[1]-seq_length] <= maxD2)){ vars->Q_c[referenceBPs1[my_iindx[1]-seq_length]][referenceBPs2[my_iindx[1]-seq_length]/2] += 1.0 * scale[seq_length]; if(update_c){ updatePosteriorBoundaries(referenceBPs1[my_iindx[1]-seq_length], referenceBPs2[my_iindx[1]-seq_length], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } else{ vars->Q_c_rem += 1.0 * scale[seq_length]; } adjustArrayBoundaries(&vars->Q_c, &vars->k_min_values_qc, &vars->k_max_values_qc, &vars->l_min_values_qc, &vars->l_max_values_qc, min_k_real, max_k_real, min_l_real, max_l_real ); } PRIVATE void make_ptypes2(TwoDpfold_vars *vars) { int n,i,j,k,l; n=vars->S[0]; for (k=1; k<n-TURN; k++) for (l=1; l<=2; l++) { int type,ntype=0,otype=0; i=k; j = i+TURN+l; if (j>n) continue; type = pair[vars->S[i]][vars->S[j]]; while ((i>=1)&&(j<=n)) { if ((i>1)&&(j<n)) ntype = pair[vars->S[i-1]][vars->S[j+1]]; if (noLonelyPairs && (!otype) && (!ntype)) type = 0; /* i.j can only form isolated pairs */ vars->ptype[vars->my_iindx[i]-j] = (char) type; otype = type; type = ntype; i--; j++; } } } PRIVATE void scale_pf_params2(TwoDpfold_vars *vars) { /* scale energy parameters and pre-calculate Boltzmann weights */ unsigned int i; double kT; if(vars->pf_params) free(vars->pf_params); vars->pf_params = get_scaled_pf_parameters(); vars->init_temp = vars->pf_params->temperature; vars->temperature = vars->init_temp; kT = vars->pf_params->kT; /* kT in cal/mol */ /* scaling factors (to avoid overflows) */ if (vars->pf_scale == -1) { /* mean energy for random sequences: 184.3*length cal */ vars->pf_scale = exp(-(-185+(vars->temperature-37.)*7.27)/kT); if (vars->pf_scale<1) vars->pf_scale=1; } vars->scale[0] = 1.; vars->scale[1] = 1./vars->pf_scale; for (i=2; i<=vars->seq_length; i++) { vars->scale[i] = vars->scale[i/2]*vars->scale[i-(i/2)]; } } /* * ################################################### * stochastic backtracking * ################################################### */ PUBLIC char *TwoDpfold_pbacktrack(TwoDpfold_vars *vars, int d1, int d2){ return TwoDpfold_pbacktrack5(vars, d1, d2, vars->seq_length); } PUBLIC char *TwoDpfold_pbacktrack5(TwoDpfold_vars *vars, int d1, int d2, unsigned int length){ char *pstruc, *ptype; short *S1; unsigned int i, j, n, start, maxD1, maxD2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, ij, cnt1, cnt2, cnt3, cnt4, type, **l_min_values, **l_max_values, **l_min_values_b, **l_max_values_b, *k_min_values, *k_max_values, *k_min_values_b, *k_max_values_b; FLT_OR_DBL r, qt, *scale, ***Q, ***Q_B, *Q_rem, *Q_B_rem; pf_paramT *pf_params; n = vars->seq_length; if(vars->circ){ if(n != length) nrerror("pbacktrack@2Dfold.c: cotranscriptional backtracking for circular RNAs not supported!"); return TwoDpfold_pbacktrack_circ(vars, d1, d2); } pf_params = vars->pf_params; maxD1 = vars->maxD1; maxD2 = vars->maxD2; my_iindx = vars->my_iindx; scale = vars->scale; ptype = vars->ptype; S1 = vars->S1; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; Q = vars->Q; l_min_values = vars->l_min_values; l_max_values = vars->l_max_values; k_min_values = vars->k_min_values; k_max_values = vars->k_max_values; Q_B = vars->Q_B; l_min_values_b = vars->l_min_values_b; l_max_values_b = vars->l_max_values_b; k_min_values_b = vars->k_min_values_b; k_max_values_b = vars->k_max_values_b; Q_rem = vars->Q_rem; Q_B_rem = vars->Q_B_rem; if(length > n) nrerror("pbacktrack@2Dpfold.c: requested transcript length exceeds sequence length!"); #if 0 if(d1 > maxD1) nrerror("pbacktrack@2Dpfold.c: distance to 1st reference structure to high!"); if(d2 > maxD2) nrerror("pbacktrack@2Dpfold.c: distance to 2nd reference structure to high!"); #endif /* check whether the chosen neighborhood exists at all */ int dumb = 1; ij = my_iindx[1]-length; if((d1 == -1) && (Q_rem[ij] != 0.)) dumb = 0; else{ if((k_min_values[ij] <= d1) && (k_max_values[ij] >= d1)){ int l_min = l_min_values[ij][d1]; if((d2 % 2) == (l_min%2)) if((l_min <= d2) && (l_max_values[ij][d1] >= d2)) dumb = 0; } } if(dumb){ fprintf(stderr, "neighborhood %d:%d is not in scope of calculated partition function!\n", d1, d2); nrerror("pbacktrack@2Dpfold.c: exiting cheerless..."); } pstruc = space((length+1)*sizeof(char)); for (i=0; i<length; i++) pstruc[i] = '.'; pstruc[i] = '\0'; start = 1; while (start<length) { int sn = my_iindx[start] - length; /* find i position of first pair */ FLT_OR_DBL qln_i = 0, qln_i1 = 0; if(d1 == -1){ qln_i = Q_rem[sn]; /* open chain ? */ if( (maxD1 > referenceBPs1[sn]) && (maxD2 > referenceBPs2[sn])){ r = urn() * qln_i; if(scale[length-start+1] > r) return pstruc; } /* lets see if we find a base pair with i involved */ for (i=start; i<length; i++) { r = urn() * qln_i; qln_i1 = Q_rem[my_iindx[i+1] - length]; da = referenceBPs1[sn] - referenceBPs1[my_iindx[i+1] - length]; db = referenceBPs2[sn] - referenceBPs2[my_iindx[i+1] - length]; for(cnt1 = k_min_values[my_iindx[i+1] - length]; cnt1 <= k_max_values[my_iindx[i+1] - length]; cnt1++) for(cnt2 = l_min_values[my_iindx[i+1] - length][cnt1]; cnt2 <= l_max_values[my_iindx[i+1] - length][cnt1]; cnt2 += 2) if(((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)){ qln_i1 += Q[my_iindx[i+1] - length][cnt1][cnt2/2]; } if(r > qln_i1*scale[1]) break; qln_i = qln_i1; } if (i>=length) break; /* no more pairs */ /* i is paired, find pairing partner j */ r = urn() * (qln_i - qln_i1*scale[1]); for (qt=0, j=i+TURN+1; j<length; j++) { ij = my_iindx[i]-j; type = ptype[ij]; if (type) { cnt1 = cnt2 = cnt3 = cnt4 = -1; double qkl = exp_E_ExtLoop(type, (i>1) ? S1[i-1] : -1, S1[j+1], pf_params); if(Q_B_rem[ij] != 0.){ if(Q_rem[my_iindx[j+1]-length] != 0.){ qt += qkl * Q_B_rem[ij] * Q_rem[my_iindx[j+1]-length]; if(qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } if(Q[my_iindx[j+1]-length]) for(cnt3 = k_min_values[my_iindx[j+1]-length]; cnt3 <= k_max_values[my_iindx[j+1]-length]; cnt3++) for(cnt4 = l_min_values[my_iindx[j+1]-length][cnt3]; cnt4 <= l_max_values[my_iindx[j+1]-length][cnt3]; cnt4 += 2){ qt += qkl * Q_B_rem[ij] * Q[my_iindx[j+1]-length][cnt3][cnt4/2]; if(qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } if(Q_rem[my_iindx[j+1]-length] != 0.){ cnt3 = cnt4 = -1; if(Q_B[ij]){ for(cnt1 = k_min_values_b[ij]; cnt1 <= k_max_values_b[ij]; cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= l_max_values_b[ij][cnt1]; cnt2 += 2){ qt += qkl * Q_B[ij][cnt1][cnt2/2] * Q_rem[my_iindx[j+1]-length]; if(qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* if we still search for pairing partner j, we go on here... */ if(Q_B[ij] && Q[my_iindx[j+1]-length]){ da = referenceBPs1[sn] - referenceBPs1[ij] - referenceBPs1[my_iindx[j+1]-length]; db = referenceBPs2[sn] - referenceBPs2[ij] - referenceBPs2[my_iindx[j+1]-length]; for(cnt1 = k_min_values_b[ij]; cnt1 <= k_max_values_b[ij]; cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= l_max_values_b[ij][cnt1]; cnt2 += 2) for(cnt3 = k_min_values[my_iindx[j+1]-length]; cnt3 <= k_max_values[my_iindx[j+1]-length]; cnt3++) for(cnt4 = l_min_values[my_iindx[j+1]-length][cnt3]; cnt4 <= l_max_values[my_iindx[j+1]-length][cnt3]; cnt4 += 2) if(((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)){ qt += qkl * Q_B[ij][cnt1][cnt2/2] * Q[my_iindx[j+1]-length][cnt3][cnt4/2]; if(qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* end if(type) */ } /* end for(j) */ cnt1 = cnt2 = cnt3 = cnt4 = -1; /* dont forget the case where i pairs with n */ j = length; ij = my_iindx[i]-j; type = ptype[ij]; if (type) { double qkl = exp_E_ExtLoop(type, (i>1) ? S1[i-1] : -1, (j<n) ? S1[j+1] : -1, pf_params); if(Q_B_rem[ij] != 0.){ qt += qkl * Q_B_rem[ij]; if(qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } /* if we still search for pairing partner j, we go on here... */ if(Q_B[ij]){ da = referenceBPs1[sn] - referenceBPs1[ij]; db = referenceBPs2[sn] - referenceBPs2[ij]; for(cnt1 = k_min_values_b[ij]; cnt1 <= k_max_values_b[ij]; cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= l_max_values_b[ij][cnt1]; cnt2 += 2) if(((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)){ qt += qkl * Q_B[ij][cnt1][cnt2/2]; if(qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* end if(type) */ j++; pbacktrack_ext_loop_early_escape_rem: if (j==length+1){ nrerror("pbacktrack@2Dpfold.c: backtracking failed in ext loop (rem)"); } /* finally start backtracking the first exterior stem */ backtrack(vars, pstruc, cnt1, cnt2, i,j); if(j==length) break; start = j+1; d1 = cnt3; d2 = cnt4; } /* end if d1 ==-1 */ else{ qln_i = Q[sn][d1][d2/2]; /* open chain ? */ if( (d1 == referenceBPs1[sn]) && (d2 == referenceBPs2[sn])){ r = urn() * qln_i; if(scale[length-start+1] > r) return pstruc; } for (i=start; i<length; i++) { r = urn() * qln_i; da = referenceBPs1[sn] - referenceBPs1[my_iindx[i+1] - length]; db = referenceBPs2[sn] - referenceBPs2[my_iindx[i+1] - length]; qln_i1 = 0; if(d1 >= da && d2 >= db) if( (d1-da >= k_min_values[my_iindx[i+1] - length]) && (d1 - da <= k_max_values[my_iindx[i+1] - length])) if( (d2 - db >= l_min_values[my_iindx[i+1] - length][d1 - da]) && (d2 - db <= l_max_values[my_iindx[i+1] - length][d1 - da])) qln_i1 += Q[my_iindx[i+1] - length][d1-da][(d2-db)/2]; if (r > qln_i1*scale[1]) break; /* i is paired */ qln_i = qln_i1; } if (i>=length) break; /* no more pairs */ /* now find the pairing partner j */ r = urn() * (qln_i - qln_i1*scale[1]); for (qt=0, j=i+1; j<length; j++) { int type; ij = my_iindx[i]-j; type = ptype[ij]; if (type) { double qkl = 1.0; qkl *= exp_E_ExtLoop(type, (i>1) ? S1[i-1] : -1, S1[j+1], pf_params); da = referenceBPs1[sn] - referenceBPs1[ij] - referenceBPs1[my_iindx[j+1]-length]; db = referenceBPs2[sn] - referenceBPs2[ij] - referenceBPs2[my_iindx[j+1]-length]; if( (d1 >= da) && (d2 >= db) && Q_B[ij] && Q[my_iindx[j+1]-length]) for(cnt1 = k_min_values_b[ij]; cnt1 <= MIN2(k_max_values_b[ij], d1-da); cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= MIN2(l_max_values_b[ij][cnt1], d2-db); cnt2+=2) if( (d1-da-cnt1 >= k_min_values[my_iindx[j+1]-length]) && (d1-da-cnt1 <= k_max_values[my_iindx[j+1]-length])) if( (d2 - db - cnt2 >= l_min_values[my_iindx[j+1]-length][d1-da-cnt1]) && (d2 - db - cnt2 <= l_max_values[my_iindx[j+1]-length][d1-da-cnt1])){ qt += qkl * Q_B[ij][cnt1][cnt2/2] * Q[my_iindx[j+1]-length][d1-da-cnt1][(d2-db-cnt2)/2]; if(qt >= r) goto pbacktrack_ext_loop_early_escape; } } } /* now dont forget the case j==n */ j = length; ij = my_iindx[i]-j; int type = ptype[ij]; if (type) { double qkl = 1.0; qkl *= exp_E_ExtLoop(type, (i>1) ? S1[i-1] : -1, (j<n) ? S1[j+1] : -1, pf_params); da = referenceBPs1[sn] - referenceBPs1[ij]; db = referenceBPs2[sn] - referenceBPs2[ij]; if(d1 >= da && d2 >= db){ cnt1 = d1 - da; cnt2 = d2 - db; if((cnt1 >= k_min_values_b[ij]) && (cnt1 <= k_max_values_b[ij])) if((cnt2 >= l_min_values_b[ij][cnt1]) && (cnt2 <= l_max_values_b[ij][cnt1])){ qt += qkl * Q_B[ij][cnt1][cnt2/2]; if(qt >= r) goto pbacktrack_ext_loop_early_escape; /* j is paired */ } } } j++; pbacktrack_ext_loop_early_escape: if (j==length+1){ nrerror("pbacktrack@2Dpfold.c: backtracking failed in ext loop"); } backtrack(vars, pstruc, cnt1, cnt2, i,j); if(j==length) break; start = j+1; d1 -= cnt1 + da; d2 -= cnt2 + db; } /* end if d1!=-1 */ } return pstruc; } PRIVATE char *TwoDpfold_pbacktrack_circ(TwoDpfold_vars *vars, int d1, int d2){ char *pstruc; unsigned int i, n, maxD1, maxD2, *referenceBPs1, *referenceBPs2; int *my_iindx, k_min_values_qc, k_max_values_qc, k_min_values_qcH, k_max_values_qcH, k_min_values_qcI, k_max_values_qcI, k_min_values_qcM, k_max_values_qcM, *l_min_values_qc, *l_max_values_qc, *l_min_values_qcH, *l_max_values_qcH, *l_min_values_qcI, *l_max_values_qcI, *l_min_values_qcM, *l_max_values_qcM; FLT_OR_DBL r, *scale, qot, **Q_c, **Q_cH, **Q_cI, **Q_cM, Q_c_rem, Q_cH_rem, Q_cI_rem, Q_cM_rem; n = vars->seq_length; maxD1 = vars->maxD1; maxD2 = vars->maxD2; my_iindx = vars->my_iindx; scale = vars->scale; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; Q_c = vars->Q_c; l_min_values_qc = vars->l_min_values_qc; l_max_values_qc = vars->l_max_values_qc; k_min_values_qc = vars->k_min_values_qc; k_max_values_qc = vars->k_max_values_qc; Q_cH = vars->Q_cH; l_min_values_qcH = vars->l_min_values_qcH; l_max_values_qcH = vars->l_max_values_qcH; k_min_values_qcH = vars->k_min_values_qcH; k_max_values_qcH = vars->k_max_values_qcH; Q_cI = vars->Q_cI; l_min_values_qcI = vars->l_min_values_qcI; l_max_values_qcI = vars->l_max_values_qcI; k_min_values_qcI = vars->k_min_values_qcI; k_max_values_qcI = vars->k_max_values_qcI; Q_cM = vars->Q_cM; l_min_values_qcM = vars->l_min_values_qcM; l_max_values_qcM = vars->l_max_values_qcM; k_min_values_qcM = vars->k_min_values_qcM; k_max_values_qcM = vars->k_max_values_qcM; Q_c_rem = vars->Q_c_rem; Q_cH_rem = vars->Q_cH_rem; Q_cI_rem = vars->Q_cI_rem; Q_cM_rem = vars->Q_cM_rem; /* check whether the chosen neighborhood exists at all */ int dumb = 1; if((d1 == -1) && (Q_c_rem != 0.)) dumb = 0; else{ if((k_min_values_qc <= d1) && (k_max_values_qc >= d1)){ int l_min = l_min_values_qc[d1]; if((d2 % 2) == (l_min%2)) if((l_min <= d2) && (l_max_values_qc[d1] >= d2)) dumb = 0; } } if(dumb){ fprintf(stderr, "neighborhood %d:%d is not in scope of calculated partition function!\n", d1, d2); nrerror("pbacktrack_circ@2Dpfold.c: exiting cheerless..."); } pstruc = space((n+1)*sizeof(char)); for (i=0; i<n; i++) pstruc[i] = '.'; pstruc[i] = '\0'; /* now we come to the actual backtracking process */ qot = 0.; /* backtrack in rest-partition */ if(d1 == -1){ r = urn() * Q_c_rem; /* open chain ? */ if((referenceBPs1[my_iindx[1]-n] > maxD1) || (referenceBPs2[my_iindx[1]-n] > maxD2)){ qot = 1.0 * scale[n]; if(qot >= r) goto pbacktrack_circ_escape; } qot += Q_cH_rem; if(qot >= r){ backtrack_qcH(vars, pstruc, d1, d2); goto pbacktrack_circ_escape; } qot += Q_cI_rem; if(qot >= r){ backtrack_qcI(vars, pstruc, d1, d2); goto pbacktrack_circ_escape; } qot += Q_cM_rem; if(qot >= r){ backtrack_qcM(vars, pstruc, d1, d2); goto pbacktrack_circ_escape; } nrerror("pbacktrack_circ@2Dpfold.c: backtracking failed in exterior loop! Exiting cheerless..."); } /* normal backtracking */ else{ r = urn() * Q_c[d1][d2/2]; /* open chain ? */ if((referenceBPs1[my_iindx[1]-n] == d1) && (referenceBPs2[my_iindx[1]-n] == d2)){ qot += 1.0 * scale[n]; if(qot >= r) goto pbacktrack_circ_escape; } /* exterior hairpin loop ? */ if((k_min_values_qcH <= d1) && (k_max_values_qcH >= d1)){ int l_min = l_min_values_qcH[d1]; if((d2 % 2) == (l_min%2)) if((l_min <= d2) && (l_max_values_qcH[d1] >= d2)){ qot += Q_cH[d1][d2/2]; if(qot >= r){ backtrack_qcH(vars, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } /* exterior interior loop ? */ if((k_min_values_qcI <= d1) && (k_max_values_qcI >= d1)){ int l_min = l_min_values_qcI[d1]; if((d2 % 2) == (l_min%2)) if((l_min <= d2) && (l_max_values_qcI[d1] >= d2)){ qot += Q_cI[d1][d2/2]; if(qot >= r){ backtrack_qcI(vars, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } /* exterior multibranch loop ? */ if((k_min_values_qcM <= d1) && (k_max_values_qcM >= d1)){ int l_min = l_min_values_qcM[d1]; if((d2 % 2) == (l_min%2)) if((l_min <= d2) && (l_max_values_qcM[d1] >= d2)){ qot += Q_cM[d1][d2/2]; if(qot >= r){ backtrack_qcM(vars, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } } pbacktrack_circ_escape: return pstruc; } PRIVATE void backtrack_qcH(TwoDpfold_vars *vars, char *pstruc, int d1, int d2){ char *ptype, *sequence; short *S1; unsigned int i, j, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int u, *my_iindx, ij, cnt1, cnt2,type, **l_min_values_b, **l_max_values_b, *k_min_values_b, *k_max_values_b; FLT_OR_DBL r, qt, *scale, qot, ***Q_B, **Q_cH, *Q_B_rem, Q_cH_rem; pf_paramT *pf_params; pf_params = vars->pf_params; sequence = vars->sequence; n = vars->seq_length; my_iindx = vars->my_iindx; scale = vars->scale; ptype = vars->ptype; S1 = vars->S1; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; maxD1 = vars->maxD1; maxD2 = vars->maxD2; Q_B_rem = vars->Q_B_rem; Q_B = vars->Q_B; l_min_values_b = vars->l_min_values_b; l_max_values_b = vars->l_max_values_b; k_min_values_b = vars->k_min_values_b; k_max_values_b = vars->k_max_values_b; Q_cH_rem = vars->Q_cH_rem; Q_cH = vars->Q_cH; qot = qt = 0.; base_d1 = referenceBPs1[my_iindx[1]-n]; base_d2 = referenceBPs2[my_iindx[1]-n]; if(d1 == -1){ r = urn() * Q_cH_rem; for(i=1;i<n;i++) for(j=i+TURN+1;j<=n;j++){ char loopseq[10]; ij = my_iindx[i]-j; u = n-j + i-1; if (u<TURN) continue; type = ptype[ij]; if (!type) continue; if(((type==3)||(type==4))&&no_closingGU) continue; type=rtype[type]; if (u<7){ strcpy(loopseq , sequence+j-1); strncat(loopseq, sequence, i); } qt = exp_E_Hairpin(u, type, S1[j+1], S1[i-1], loopseq, pf_params) * scale[u]; if(Q_B_rem[ij]){ qot += Q_B_rem[ij] * qt; if(qot >= r){ backtrack(vars, pstruc, d1, d2, i, j); return; } } da = base_d1 - referenceBPs1[ij]; db = base_d2 - referenceBPs2[ij]; if(Q_B[ij]){ for(cnt1 = k_min_values_b[ij]; cnt1 <= k_max_values_b[ij]; cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= l_max_values_b[ij][cnt1]; cnt2 += 2){ if( ((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)){ qot += Q_B[ij][cnt1][cnt2/2] * qt; if(qot >= r){ backtrack(vars, pstruc, cnt1, cnt2, i, j); return; } } } } } } else{ r = urn() * Q_cH[d1][d2/2]; for(i=1;i<n;i++) for(j=i+TURN+1;j<=n;j++){ char loopseq[10]; ij = my_iindx[i]-j; if(!Q_B[ij]) continue; u = n-j + i-1; if (u<TURN) continue; type = ptype[ij]; if (!type) continue; if(((type==3)||(type==4))&&no_closingGU) continue; type=rtype[type]; if (u<7){ strcpy(loopseq , sequence+j-1); strncat(loopseq, sequence, i); } qt = exp_E_Hairpin(u, type, S1[j+1], S1[i-1], loopseq, pf_params) * scale[u]; da = base_d1 - referenceBPs1[ij]; db = base_d2 - referenceBPs2[ij]; for(cnt1 = k_min_values_b[ij]; cnt1 <= k_max_values_b[ij]; cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= l_max_values_b[ij][cnt1]; cnt2 += 2){ if( ((cnt1 + da) == d1) && ((cnt2 + db) == d2)){ qot += Q_B[ij][cnt1][cnt2/2] * qt; if(qot >= r){ backtrack(vars, pstruc, cnt1, cnt2, i, j); return; } } } } } nrerror("backtrack_qcH@2Dpfold.c: failed to find closing pair!"); } PRIVATE void backtrack_qcI(TwoDpfold_vars *vars, char *pstruc, int d1, int d2){ char *ptype; short *S1; unsigned int i, j, ij, p, q, pq, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, cnt1, cnt2, cnt3, cnt4, type, **l_min_values_b, **l_max_values_b, *k_min_values_b, *k_max_values_b; FLT_OR_DBL r, qt, *scale, qot, ***Q_B, *Q_B_rem, **Q_cI, Q_cI_rem; pf_paramT *pf_params; pf_params = vars->pf_params; n = vars->seq_length; my_iindx = vars->my_iindx; scale = vars->scale; ptype = vars->ptype; S1 = vars->S1; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; maxD1 = vars->maxD1; maxD2 = vars->maxD2; Q_B = vars->Q_B; l_min_values_b = vars->l_min_values_b; l_max_values_b = vars->l_max_values_b; k_min_values_b = vars->k_min_values_b; k_max_values_b = vars->k_max_values_b; Q_cI = vars->Q_cI; Q_B_rem = vars->Q_B_rem; Q_cI_rem = vars->Q_cI_rem; qot = qt = 0.; base_d1 = referenceBPs1[my_iindx[1]-n]; base_d2 = referenceBPs2[my_iindx[1]-n]; if(d1 == -1){ r = urn() * Q_cI_rem; for(i=1;i<n;i++) for(j=i+TURN+1;j<=n;j++){ ij = my_iindx[i]-j; type = rtype[(unsigned int)ptype[ij]]; if(!ptype) continue; if(Q_B_rem[ij]) for(p=j+1; p < n; p++){ unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if(ln1+i-1>MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if(ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for(q=qstart;q <= n; q++){ unsigned int ln2; int type2; pq = my_iindx[p]-q; ln2 = (i - 1) + (n - q); if((ln1+ln2) > MAXLOOP) continue; type2 = ptype[pq]; if(!type2) continue; qt = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[q+1], S1[p-1], S1[i-1], S1[j+1], pf_params) * scale[ln1 + ln2]; if(Q_B_rem[pq]){ qot += Q_B_rem[ij] * Q_B_rem[pq] * qt; if(qot > r){ backtrack(vars, pstruc, d1, d2, i, j); backtrack(vars, pstruc, d1, d2, p, q); return; } } if(Q_B[pq]) for(cnt1 = k_min_values_b[pq]; cnt1 <= k_max_values_b[pq]; cnt1++) for(cnt2 = l_min_values_b[pq][cnt1]; cnt2 <= l_max_values_b[pq][cnt1]; cnt2 += 2){ qot += Q_B_rem[ij] * Q_B[pq][cnt1][cnt2/2] * qt; if(qot > r){ backtrack(vars, pstruc, d1, d2, i, j); backtrack(vars, pstruc, cnt1, cnt2, p, q); return; } } } } if(Q_B[ij]){ for(p=j+1; p < n; p++){ unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if(ln1+i-1>MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if(ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for(q=qstart;q <= n; q++){ unsigned int ln2; int type2; pq = my_iindx[p]-q; ln2 = (i - 1) + (n - q); if((ln1+ln2) > MAXLOOP) continue; type2 = ptype[pq]; if(!type2) continue; qt = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[q+1], S1[p-1], S1[i-1], S1[j+1], pf_params) * scale[ln1 + ln2]; if(Q_B_rem[pq]) for(cnt1 = k_min_values_b[ij]; cnt1 <= k_max_values_b[ij]; cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= l_max_values_b[ij][cnt1]; cnt2 += 2){ qot += Q_B[ij][cnt1][cnt2/2] * Q_B_rem[pq] * qt; if(qot > r){ backtrack(vars, pstruc, cnt1, cnt2, i, j); backtrack(vars, pstruc, d1, d2, p, q); return; } } if(Q_B[pq]){ da = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; db = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; for(cnt1 = k_min_values_b[ij]; cnt1 <= k_max_values_b[ij]; cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= l_max_values_b[ij][cnt1]; cnt2 += 2) for(cnt3 = k_min_values_b[pq]; cnt3 <= k_max_values_b[pq]; cnt3++) for(cnt4 = l_min_values_b[pq][cnt3]; cnt4 <= l_max_values_b[pq][cnt3]; cnt4 += 2){ if( ((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)){ qot += Q_B[ij][cnt1][cnt2/2] * Q_B[pq][cnt3][cnt4/2] * qt; if(qot > r){ backtrack(vars, pstruc, cnt1, cnt2, i, j); backtrack(vars, pstruc, cnt3, cnt4, p, q); return; } } } } } } } } } else{ r = urn() * Q_cI[d1][d2/2]; for(i=1;i<n;i++) for(j=i+TURN+1;j<=n;j++){ ij = my_iindx[i]-j; type = rtype[(unsigned int)ptype[ij]]; if(!ptype) continue; if(!Q_B[ij]) continue; for(p=j+1; p < n; p++){ unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if(ln1+i-1>MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if(ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for(q=qstart;q <= n; q++){ unsigned int ln2; int type2; pq = my_iindx[p]-q; if(!Q_B[pq]) continue; ln2 = (i - 1) + (n - q); if((ln1+ln2) > MAXLOOP) continue; type2 = ptype[pq]; if(!type2) continue; qt = exp_E_IntLoop( ln2, ln1, rtype[type2], type, S1[q+1], S1[p-1], S1[i-1], S1[j+1], pf_params) * scale[ln1 + ln2]; da = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; db = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; for(cnt1 = k_min_values_b[ij]; cnt1 <= k_max_values_b[ij]; cnt1++) for(cnt2 = l_min_values_b[ij][cnt1]; cnt2 <= l_max_values_b[ij][cnt1]; cnt2 += 2) for(cnt3 = k_min_values_b[pq]; cnt3 <= k_max_values_b[pq]; cnt3++) for(cnt4 = l_min_values_b[pq][cnt3]; cnt4 <= l_max_values_b[pq][cnt3]; cnt4 += 2){ if( ((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)){ qot += Q_B[ij][cnt1][cnt2/2] * Q_B[pq][cnt3][cnt4/2] * qt; if(qot > r){ backtrack(vars, pstruc, cnt1, cnt2, i, j); backtrack(vars, pstruc, cnt3, cnt4, p, q); return; } } } } } } } } PRIVATE void backtrack_qcM(TwoDpfold_vars *vars, char *pstruc, int d1, int d2){ unsigned int k, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, cnt1, cnt2, cnt3, cnt4, **l_min_values_m, **l_max_values_m, **l_min_values_m2, **l_max_values_m2, *k_min_values_m, *k_max_values_m, *k_min_values_m2, *k_max_values_m2; FLT_OR_DBL r, qt, qot, ***Q_M, ***Q_M2, **Q_cM, *Q_M_rem, *Q_M2_rem, Q_cM_rem; pf_paramT *pf_params; pf_params = vars->pf_params; n = vars->seq_length; my_iindx = vars->my_iindx; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; maxD1 = vars->maxD1; maxD2 = vars->maxD2; Q_cM = vars->Q_cM; Q_M = vars->Q_M; l_min_values_m = vars->l_min_values_m; l_max_values_m = vars->l_max_values_m; k_min_values_m = vars->k_min_values_m; k_max_values_m = vars->k_max_values_m; Q_M2 = vars->Q_M2; l_min_values_m2 = vars->l_min_values_m2; l_max_values_m2 = vars->l_max_values_m2; k_min_values_m2 = vars->k_min_values_m2; k_max_values_m2 = vars->k_max_values_m2; Q_cM_rem = vars->Q_cM_rem; Q_M_rem = vars->Q_M_rem; Q_M2_rem = vars->Q_M2_rem; base_d1 = referenceBPs1[my_iindx[1]-n]; base_d2 = referenceBPs2[my_iindx[1]-n]; qot = qt = 0.; if(d1 == -1){ r = urn() * Q_cM_rem; for(k = TURN + 2; k < n - 2 * TURN - 3; k++){ if(Q_M_rem[my_iindx[1]-k]){ if(Q_M2[k+1]) for(cnt1 = k_min_values_m2[k+1]; cnt1 <= k_max_values_m2[k+1]; cnt1++) for(cnt2 = l_min_values_m2[k+1][cnt1]; cnt2 <= l_max_values_m2[k+1][cnt1]; cnt2 += 2){ qot += Q_M_rem[my_iindx[1]-k] * Q_M2[k+1][cnt1][cnt2/2] * pf_params->expMLclosing; if(qot > r){ backtrack_qm(vars, pstruc, d1, d2, 1, k); backtrack_qm2(vars, pstruc, cnt1, cnt2, k+1); return; } } if(Q_M2_rem[k+1]){ qot += Q_M_rem[my_iindx[1]-k] * Q_M2_rem[k+1] * pf_params->expMLclosing; if(qot > r){ backtrack_qm(vars, pstruc, d1, d2, 1, k); backtrack_qm2(vars, pstruc, d1, d2, k+1); return; } } } if(Q_M2_rem[k+1]){ if(Q_M[my_iindx[1]-k]) for(cnt1 = k_min_values_m[my_iindx[1]-k]; cnt1 <= k_max_values_m[my_iindx[1]-k]; cnt1++) for(cnt2 = l_min_values_m[my_iindx[1]-k][cnt1]; cnt2 <= l_max_values_m[my_iindx[1]-k][cnt1]; cnt2 += 2){ qot += Q_M[my_iindx[1]-k][cnt1][cnt2/2] * Q_M2_rem[k+1] * pf_params->expMLclosing; if(qot > r){ backtrack_qm(vars, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vars, pstruc, d1, d2, k+1); return; } } } da = base_d1 - referenceBPs1[my_iindx[1]-k] - referenceBPs1[my_iindx[k+1]-n]; db = base_d2 - referenceBPs2[my_iindx[1]-k] - referenceBPs2[my_iindx[k+1]-n]; if( Q_M[my_iindx[1]-k] && Q_M2[k+1]) for(cnt1 = k_min_values_m[my_iindx[1]-k]; cnt1 <= k_max_values_m[my_iindx[1]-k]; cnt1++) for(cnt2 = l_min_values_m[my_iindx[1]-k][cnt1]; cnt2 <= l_max_values_m[my_iindx[1]-k][cnt1]; cnt2 += 2) for(cnt3 = k_min_values_m2[k+1]; cnt3 <= k_max_values_m2[k+1]; cnt3++) for(cnt4 = l_min_values_m2[k+1][cnt3]; cnt4 <= l_max_values_m2[k+1][cnt3]; cnt4 += 2){ if( ((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)){ qot += Q_M[my_iindx[1]-k][cnt1][cnt2/2] * Q_M2[k+1][cnt3][cnt4/2] * pf_params->expMLclosing; if(qot > r){ backtrack_qm(vars, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vars, pstruc, cnt3, cnt4, k+1); return; } } } } } else{ r = urn() * Q_cM[d1][d2/2]; for(k = TURN + 2; k < n - 2 * TURN - 3; k++){ da = base_d1 - referenceBPs1[my_iindx[1]-k] - referenceBPs1[my_iindx[k+1]-n]; db = base_d2 - referenceBPs2[my_iindx[1]-k] - referenceBPs2[my_iindx[k+1]-n]; if( Q_M[my_iindx[1]-k] && Q_M2[k+1]) for(cnt1 = k_min_values_m[my_iindx[1]-k]; cnt1 <= k_max_values_m[my_iindx[1]-k]; cnt1++) for(cnt2 = l_min_values_m[my_iindx[1]-k][cnt1]; cnt2 <= l_max_values_m[my_iindx[1]-k][cnt1]; cnt2 += 2) for(cnt3 = k_min_values_m2[k+1]; cnt3 <= k_max_values_m2[k+1]; cnt3++) for(cnt4 = l_min_values_m2[k+1][cnt3]; cnt4 <= l_max_values_m2[k+1][cnt3]; cnt4 += 2) if( ((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)){ qot += Q_M[my_iindx[1]-k][cnt1][cnt2/2] * Q_M2[k+1][cnt3][cnt4/2] * pf_params->expMLclosing; if(qot > r){ backtrack_qm(vars, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vars, pstruc, cnt3, cnt4, k+1); return; } } } } nrerror("backtrack_qcM@2Dpfold.c: backtracking failed"); } PRIVATE void backtrack_qm2( TwoDpfold_vars *vars, char *pstruc, int d1, int d2, unsigned int k){ unsigned int l, n, maxD1, maxD2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4, *k_min_values_m1, *k_max_values_m1, **l_min_values_m1, **l_max_values_m1; FLT_OR_DBL r, qt, qot, ***Q_M2, ***Q_M1, *Q_M2_rem, *Q_M1_rem; n = vars->seq_length; my_iindx = vars->my_iindx; jindx = vars->jindx; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; maxD1 = vars->maxD1; maxD2 = vars->maxD2; Q_M1_rem = vars->Q_M1_rem; Q_M1 = vars->Q_M1; l_min_values_m1 = vars->l_min_values_m1; l_max_values_m1 = vars->l_max_values_m1; k_min_values_m1 = vars->k_min_values_m1; k_max_values_m1 = vars->k_max_values_m1; Q_M2_rem = vars->Q_M2_rem; Q_M2 = vars->Q_M2; qot = qt = 0.; if(d1 == -1){ r = urn() * Q_M2_rem[k]; for (l=k+TURN+1; l<n-TURN-1; l++){ if(Q_M1_rem[jindx[l]+k]){ if(Q_M1[jindx[n]+l+1]){ for(cnt1 = k_min_values_m1[jindx[n]+l+1]; cnt1 <= k_max_values_m1[jindx[n]+l+1]; cnt1++) for(cnt2 = l_min_values_m1[jindx[n]+l+1][cnt1]; cnt2 <= l_max_values_m1[jindx[n]+l+1][cnt1]; cnt2 += 2){ qot += Q_M1_rem[jindx[l]+k] * Q_M1[jindx[n]+l+1][cnt1][cnt2/2]; if(qot > r){ backtrack_qm1(vars, pstruc, d1, d2, k, l); backtrack_qm1(vars, pstruc, cnt1, cnt2, l+1, n); return; } } } if(Q_M1_rem[jindx[n]+l+1]){ qot += Q_M1_rem[jindx[l]+k] * Q_M1_rem[jindx[n]+l+1]; if(qot > r){ backtrack_qm1(vars, pstruc, d1, d2, k, l); backtrack_qm1(vars, pstruc, d1, d2, l+1, n); return; } } } if(Q_M1_rem[jindx[n]+l+1]){ if(Q_M1[jindx[l]+k]) for(cnt1 = k_min_values_m1[jindx[l]+k]; cnt1 <= k_max_values_m1[jindx[l]+k]; cnt1++) for(cnt2 = l_min_values_m1[jindx[l]+k][cnt1]; cnt2 <= l_max_values_m1[jindx[l]+k][cnt1]; cnt2 += 2){ qot += Q_M1[jindx[l]+k][cnt1][cnt2/2] * Q_M1_rem[jindx[n]+l+1]; if(qot > r){ backtrack_qm1(vars, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vars, pstruc, d1, d2, l+1, n); return; } } } if(!Q_M1[jindx[l]+k]) continue; if(!Q_M1[jindx[n] + l + 1]) continue; da = referenceBPs1[my_iindx[k]-n] - referenceBPs1[my_iindx[k]-l] - referenceBPs1[my_iindx[l+1]-n]; db = referenceBPs2[my_iindx[k]-n] - referenceBPs2[my_iindx[k]-l] - referenceBPs2[my_iindx[l+1]-n]; for(cnt1 = k_min_values_m1[jindx[l]+k]; cnt1 <= k_max_values_m1[jindx[l]+k]; cnt1++) for(cnt2 = l_min_values_m1[jindx[l]+k][cnt1]; cnt2 <= l_max_values_m1[jindx[l]+k][cnt1]; cnt2 += 2){ for(cnt3 = k_min_values_m1[jindx[n] + l + 1]; cnt3 <= k_max_values_m1[jindx[n] + l + 1]; cnt3++) for(cnt4 = l_min_values_m1[jindx[n] + l + 1][cnt3]; cnt4 <= l_max_values_m1[jindx[n] + l + 1][cnt3]; cnt4 += 2){ if( ((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)){ qot += Q_M1[jindx[l]+k][cnt1][cnt2/2] * Q_M1[jindx[n] + l + 1][cnt3][cnt4/2]; if(qot > r){ backtrack_qm1(vars, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vars, pstruc, cnt3, cnt4, l+1, n); return; } } } } } } else{ r = urn() * Q_M2[k][d1][d2/2]; for (l=k+TURN+1; l<n-TURN-1; l++){ if(!Q_M1[jindx[l]+k]) continue; if(!Q_M1[jindx[n] + l + 1]) continue; da = referenceBPs1[my_iindx[k]-n] - referenceBPs1[my_iindx[k]-l] - referenceBPs1[my_iindx[l+1]-n]; db = referenceBPs2[my_iindx[k]-n] - referenceBPs2[my_iindx[k]-l] - referenceBPs2[my_iindx[l+1]-n]; for(cnt1 = k_min_values_m1[jindx[l]+k]; cnt1 <= k_max_values_m1[jindx[l]+k]; cnt1++) for(cnt2 = l_min_values_m1[jindx[l]+k][cnt1]; cnt2 <= l_max_values_m1[jindx[l]+k][cnt1]; cnt2 += 2){ for(cnt3 = k_min_values_m1[jindx[n] + l + 1]; cnt3 <= k_max_values_m1[jindx[n] + l + 1]; cnt3++) for(cnt4 = l_min_values_m1[jindx[n] + l + 1][cnt3]; cnt4 <= l_max_values_m1[jindx[n] + l + 1][cnt3]; cnt4 += 2){ if( ((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)){ qot += Q_M1[jindx[l]+k][cnt1][cnt2/2] * Q_M1[jindx[n] + l + 1][cnt3][cnt4/2]; if(qot > r){ backtrack_qm1(vars, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vars, pstruc, cnt3, cnt4, l+1, n); return; } } } } } } nrerror("backtrack_qm2@2Dpfold.c: backtracking failed"); } PRIVATE void backtrack(TwoDpfold_vars *vars, char *pstruc, int d1, int d2, unsigned int i, unsigned int j) { FLT_OR_DBL *scale; unsigned int maxD1, maxD2, base_d1, base_d2, da, db; unsigned int *referenceBPs1, *referenceBPs2; pf_paramT *pf_params; /* holds all [unscaled] pf parameters */ char *ptype, *sequence; short *S1, *reference_pt1, *reference_pt2; int *my_iindx, *jindx, ij, cnt1, cnt2, cnt3, cnt4; pf_params = vars->pf_params; sequence = vars->sequence; maxD1 = vars->maxD1; maxD2 = vars->maxD2; my_iindx = vars->my_iindx; jindx = vars->jindx; scale = vars->scale; ptype = vars->ptype; S1 = vars->S1; reference_pt1 = vars->reference_pt1; reference_pt2 = vars->reference_pt2; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; FLT_OR_DBL ***Q_B, ***Q_M, ***Q_M1, *Q_B_rem, *Q_M_rem, *Q_M1_rem; int *k_min_values_m, *k_max_values_m,*k_min_values_m1, *k_max_values_m1,*k_min_values_b, *k_max_values_b; int **l_min_values_m, **l_max_values_m,**l_min_values_m1, **l_max_values_m1,**l_min_values_b, **l_max_values_b; Q_B = vars->Q_B; k_min_values_b = vars->k_min_values_b; k_max_values_b = vars->k_max_values_b; l_min_values_b = vars->l_min_values_b; l_max_values_b = vars->l_max_values_b; Q_M = vars->Q_M; k_min_values_m = vars->k_min_values_m; k_max_values_m = vars->k_max_values_m; l_min_values_m = vars->l_min_values_m; l_max_values_m = vars->l_max_values_m; Q_M1 = vars->Q_M1; k_min_values_m1 = vars->k_min_values_m1; k_max_values_m1 = vars->k_max_values_m1; l_min_values_m1 = vars->l_min_values_m1; l_max_values_m1 = vars->l_max_values_m1; Q_B_rem = vars->Q_B_rem; Q_M_rem = vars->Q_M_rem; Q_M1_rem = vars->Q_M1_rem; do { double r, qbt1 = 0.; unsigned int k, l, u, u1; int type; pstruc[i-1] = '('; pstruc[j-1] = ')'; r = 0.; ij = my_iindx[i]-j; if(d1 == -1){ r= urn() * Q_B_rem[ij]; if(r == 0.) nrerror("backtrack@2Dpfold.c: backtracking failed\n"); type = ptype[ij]; u = j-i-1; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_d1 + referenceBPs1[ij]; db = base_d2 + referenceBPs2[ij]; /* hairpin ? */ if((da > maxD1) || (db > maxD2)) if(!(((type==3)||(type==4))&&no_closingGU)) qbt1 = exp_E_Hairpin(u, type, S1[i+1], S1[j-1], sequence+i-1, pf_params) * scale[u+2]; if (qbt1>=r) return; /* found the hairpin we're done */ /* lets see if we form an interior loop */ for (k=i+1; k<=MIN2(i+MAXLOOP+1,j-TURN-2); k++) { unsigned int u_pre, lmin; u1 = k-i-1; lmin = k + TURN + 1; u_pre = u1 + j; /* lmin = MAX2(k + TURN + 1, u1 + j - 1 - MAXLOOP) */ if(u_pre > lmin + MAXLOOP) lmin = u_pre - 1 - MAXLOOP; for (l=lmin; l<j; l++) { int type_2; type_2 = ptype[my_iindx[k]-l]; if (type_2) { cnt1 = cnt2 = -1; da = base_d1 + referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[k]-l]; db = base_d2 + referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[k]-l]; type_2 = rtype[type_2]; FLT_OR_DBL tmp_en = exp_E_IntLoop(u1, j-l-1, type, type_2, S1[i+1], S1[j-1], S1[k-1], S1[l+1], pf_params) * scale[u1+j-l+1]; if(Q_B_rem[my_iindx[k]-l] != 0.){ qbt1 += Q_B_rem[my_iindx[k]-l] * tmp_en; if(qbt1 > r) goto backtrack_int_early_escape_rem; } if(Q_B[my_iindx[k]-l]) for(cnt1 = k_min_values_b[my_iindx[k]-l]; cnt1 <= k_max_values_b[my_iindx[k]-l]; cnt1++) for(cnt2 = l_min_values_b[my_iindx[k]-l][cnt1]; cnt2 <= l_max_values_b[my_iindx[k]-l][cnt1]; cnt2 += 2) if(((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)){ qbt1 += Q_B[my_iindx[k]-l][cnt1][cnt2/2] * tmp_en; if(qbt1 > r) goto backtrack_int_early_escape_rem; } } } } backtrack_int_early_escape_rem: if (l<j) { i=k; j=l; d1 = cnt1; d2 = cnt2; } else break; } else{ if((d1 >= k_min_values_b[ij]) && (d1 <= k_max_values_b[ij])) if((d2 >= l_min_values_b[ij][d1]) && (d2 <= l_max_values_b[ij][d1])) r = urn() * Q_B[ij][d1][d2/2]; if(r == 0.) nrerror("backtrack@2Dpfold.c: backtracking failed\n"); type = ptype[ij]; u = j-i-1; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_d1 + referenceBPs1[ij]; db = base_d2 + referenceBPs2[ij]; /*hairpin contribution*/ if((da == d1) && (db == d2)) if(!(((type==3)||(type==4))&&no_closingGU)) qbt1 = exp_E_Hairpin(u, type, S1[i+1], S1[j-1], sequence+i-1, pf_params) * scale[u+2]; if (qbt1>=r) return; /* found the hairpin we're done */ for (k=i+1; k<=MIN2(i+MAXLOOP+1,j-TURN-2); k++) { unsigned int u_pre, lmin; u1 = k-i-1; lmin = k + TURN + 1; u_pre = u1 + j; /* lmin = MAX2(k + TURN + 1, u1 + j - 1 - MAXLOOP) */ if(u_pre > lmin + MAXLOOP) lmin = u_pre - 1 - MAXLOOP; for (l=lmin; l<j; l++) { int type_2; type_2 = ptype[my_iindx[k]-l]; if (type_2) { da = base_d1 + referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[k]-l]; db = base_d2 + referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[k]-l]; type_2 = rtype[type_2]; FLT_OR_DBL tmp_en = exp_E_IntLoop(u1, j-l-1, type, type_2, S1[i+1], S1[j-1], S1[k-1], S1[l+1], pf_params) * scale[u1+j-l+1]; if(d1 >= da && d2 >= db) if((d1 - da >= k_min_values_b[my_iindx[k]-l]) && (d1 - da <= k_max_values_b[my_iindx[k]-l])) if((d2 - db >= l_min_values_b[my_iindx[k]-l][d1 - da]) && (d2 - db <= l_max_values_b[my_iindx[k]-l][d1 - da])){ cnt1 = d1 - da; cnt2 = d2 - db; qbt1 += Q_B[my_iindx[k]-l][cnt1][cnt2/2] * tmp_en; if(qbt1 > r) goto backtrack_int_early_escape; } } } } backtrack_int_early_escape: if (l<j) { i=k; j=l; d1 = cnt1; d2 = cnt2; } else break; } } while (1); /* backtrack in multi-loop */ { double r, qt; unsigned int k, ii, jj; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; base_d1 += referenceBPs1[my_iindx[i]-j]; base_d2 += referenceBPs2[my_iindx[i]-j]; i++; j--; /* find the first split index */ ii = my_iindx[i]; /* ii-j=[i,j] */ jj = jindx[j]; /* jj+i=[j,i] */ if(d1 == -1){ /* get total contribution for current part */ for (qt=0., k=i+1; k<j; k++){ if(Q_M_rem[ii-k+1] != 0.){ if(Q_M1[jj+k]) for(cnt1 = k_min_values_m1[jj+k]; cnt1 <= k_max_values_m1[jj+k]; cnt1++) for(cnt2 = l_min_values_m1[jj+k][cnt1]; cnt2 <= l_max_values_m1[jj+k][cnt1]; cnt2 += 2) qt += Q_M_rem[ii-k+1] * Q_M1[jj+k][cnt1][cnt2/2]; if(Q_M1_rem[jj+k] != 0.) qt += Q_M_rem[ii-k+1] * Q_M1_rem[jj+k]; } if(Q_M1_rem[jj+k] != 0.){ if(Q_M[ii-k+1]) for(cnt1 = k_min_values_m[ii-k+1]; cnt1 <= k_max_values_m[ii-k+1]; cnt1++) for(cnt2 = l_min_values_m[ii-k+1][cnt1]; cnt2 <= l_max_values_m[ii-k+1][cnt1]; cnt2 += 2) qt += Q_M[ii-k+1][cnt1][cnt2/2] * Q_M1_rem[jj+k]; } /* calculate introduced distance to reference structures */ if(!Q_M[ii-k+1]) continue; if(!Q_M1[jj+k]) continue; da = base_d1 - referenceBPs1[my_iindx[i]-k+1] - referenceBPs1[my_iindx[k]-j]; db = base_d2 - referenceBPs2[my_iindx[i]-k+1] - referenceBPs2[my_iindx[k]-j]; /* collect all contributing energies */ for(cnt1 = k_min_values_m[ii-k+1]; cnt1 <= k_max_values_m[ii-k+1]; cnt1++) for(cnt2 = l_min_values_m[ii-k+1][cnt1]; cnt2 <= l_max_values_m[ii-k+1][cnt1]; cnt2 += 2) for(cnt3 = k_min_values_m1[jj+k]; cnt3 <= k_max_values_m1[jj+k]; cnt3++) for(cnt4 = l_min_values_m1[jj+k][cnt3]; cnt4 <= l_max_values_m1[jj+k][cnt3]; cnt4 += 2) if(((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) qt += Q_M[ii-k+1][cnt1][cnt2/2] * Q_M1[jj+k][cnt3][cnt4/2]; } /* throw the dice */ r = urn() * qt; for (qt=0., k=i+1; k<j; k++) { cnt1 = cnt2 = cnt3 = cnt4 = -1; if(Q_M_rem[ii-k+1] != 0.){ if(Q_M1_rem[jj+k] != 0){ qt += Q_M_rem[ii-k+1] * Q_M1_rem[jj+k]; if(qt >= r) goto backtrack_ml_early_escape; } if(Q_M1[jj+k]) for(cnt3 = k_min_values_m1[jj+k]; cnt3 <= k_max_values_m1[jj+k]; cnt3++) for(cnt4 = l_min_values_m1[jj+k][cnt3]; cnt4 <= l_max_values_m1[jj+k][cnt3]; cnt4 += 2){ qt += Q_M_rem[ii-k+1] * Q_M1[jj+k][cnt3][cnt4/2]; if(qt >= r) goto backtrack_ml_early_escape; } } if(Q_M1_rem[jj+k] != 0.){ cnt3 = cnt4 = -1; if(Q_M[ii-k+1]) for(cnt1 = k_min_values_m[ii-k+1]; cnt1 <= k_max_values_m[ii-k+1]; cnt1++) for(cnt2 = l_min_values_m[ii-k+1][cnt1]; cnt2 <= l_max_values_m[ii-k+1][cnt1]; cnt2 += 2){ qt += Q_M[ii-k+1][cnt1][cnt2/2] * Q_M1_rem[jj+k]; if(qt >= r) goto backtrack_ml_early_escape; } } /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i]-k+1] - referenceBPs1[my_iindx[k]-j]; db = base_d2 - referenceBPs2[my_iindx[i]-k+1] - referenceBPs2[my_iindx[k]-j]; /* collect all contributing energies */ if(!Q_M[ii-k+1]) continue; if(!Q_M1[jj+k]) continue; for(cnt1 = k_min_values_m[ii-k+1]; cnt1 <= k_max_values_m[ii-k+1]; cnt1++) for(cnt2 = l_min_values_m[ii-k+1][cnt1]; cnt2 <= l_max_values_m[ii-k+1][cnt1]; cnt2 += 2) for(cnt3 = k_min_values_m1[jj+k]; cnt3 <= k_max_values_m1[jj+k]; cnt3++) for(cnt4 = l_min_values_m1[jj+k][cnt3]; cnt4 <= l_max_values_m1[jj+k][cnt3]; cnt4 += 2) if(((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)){ qt += Q_M[ii-k+1][cnt1][cnt2/2] * Q_M1[jj+k][cnt3][cnt4/2]; if (qt>=r) goto backtrack_ml_early_escape; } } } else{ /* get total contribution */ for (qt=0., k=i+1; k<j; k++){ /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i]-k+1] - referenceBPs1[my_iindx[k]-j]; db = base_d2 - referenceBPs2[my_iindx[i]-k+1] - referenceBPs2[my_iindx[k]-j]; /* collect all contributing energies */ if(d1 >= da && d2 >= db && Q_M[ii-k+1] && Q_M1[jj+k]) for(cnt1 = k_min_values_m[ii-k+1]; cnt1 <= MIN2(k_max_values_m[ii-k+1], d1-da); cnt1++) for(cnt2 = l_min_values_m[ii-k+1][cnt1]; cnt2 <= MIN2(l_max_values_m[ii-k+1][cnt1], d2 - db); cnt2+=2) if((d1-cnt1-da >= k_min_values_m1[jj+k]) && (d1-cnt1-da <= k_max_values_m1[jj+k])) if((d2 - cnt2 - db >= l_min_values_m1[jj+k][d1-da-cnt1]) && (d2 - cnt2 - db <= l_max_values_m1[jj+k][d1-cnt1-da])) qt += Q_M[ii-k+1][cnt1][cnt2/2] * Q_M1[jj+k][d1-da-cnt1][(d2-db-cnt2)/2]; } r = urn() * qt; for (qt=0., k=i+1; k<j; k++) { /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i]-k+1] - referenceBPs1[my_iindx[k]-j]; db = base_d2 - referenceBPs2[my_iindx[i]-k+1] - referenceBPs2[my_iindx[k]-j]; /* collect all contributing energies */ if(d1 >= da && d2 >= db && Q_M[ii-k+1] && Q_M1[jj+k]) for(cnt1 = k_min_values_m[ii-k+1]; cnt1 <= MIN2(k_max_values_m[ii-k+1], d1-da); cnt1++) for(cnt2 = l_min_values_m[ii-k+1][cnt1]; cnt2 <= MIN2(l_max_values_m[ii-k+1][cnt1], d2 - db); cnt2+=2) if((d1-cnt1-da >= k_min_values_m1[jj+k]) && (d1-cnt1-da <= k_max_values_m1[jj+k])) if((d2 - cnt2 - db >= l_min_values_m1[jj+k][d1-da-cnt1]) && (d2 - cnt2 - db <= l_max_values_m1[jj+k][d1-cnt1-da])){ cnt3 = d1-da-cnt1; cnt4 = d2-db-cnt2; qt += Q_M[ii-k+1][cnt1][cnt2/2] * Q_M1[jj+k][cnt3][cnt4/2]; if (qt>=r) goto backtrack_ml_early_escape; } } } if (k>=j) nrerror("backtrack failed, can't find split index "); backtrack_ml_early_escape: backtrack_qm1(vars, pstruc, cnt3, cnt4, k, j); j = k-1; backtrack_qm(vars, pstruc, cnt1, cnt2, i, j); } } PRIVATE void backtrack_qm1(TwoDpfold_vars *vars, char *pstruc, int d1, int d2, unsigned int i, unsigned int j){ /* i is paired to l, i<l<j; backtrack in qm1 to find l */ FLT_OR_DBL r, qt, *scale; unsigned int maxD1, maxD2, da, db; unsigned int *referenceBPs1, *referenceBPs2; pf_paramT *pf_params; /* holds all [unscaled] pf parameters */ char *ptype; short *S1; int *my_iindx, *jindx, cnt1, cnt2; pf_params = vars->pf_params; maxD1 = vars->maxD1; maxD2 = vars->maxD2; my_iindx = vars->my_iindx; jindx = vars->jindx; scale = vars->scale; ptype = vars->ptype; S1 = vars->S1; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; FLT_OR_DBL ***Q_B, ***Q_M1, *Q_B_rem, *Q_M1_rem; int *k_min_values_m1, *k_max_values_m1,*k_min_values_b, *k_max_values_b; int **l_min_values_m1, **l_max_values_m1,**l_min_values_b, **l_max_values_b; Q_B = vars->Q_B; k_min_values_b = vars->k_min_values_b; k_max_values_b = vars->k_max_values_b; l_min_values_b = vars->l_min_values_b; l_max_values_b = vars->l_max_values_b; Q_M1 = vars->Q_M1; k_min_values_m1 = vars->k_min_values_m1; k_max_values_m1 = vars->k_max_values_m1; l_min_values_m1 = vars->l_min_values_m1; l_max_values_m1 = vars->l_max_values_m1; Q_B_rem = vars->Q_B_rem; Q_M1_rem = vars->Q_M1_rem; unsigned int ii, l; int type; /* find qm1 contribution */ if(d1 == -1) r = urn() * Q_M1_rem[jindx[j]+i]; else{ if((d1 >= k_min_values_m1[jindx[j]+i]) && (d1 <= k_max_values_m1[jindx[j]+i])) if((d2 >= l_min_values_m1[jindx[j]+i][d1]) && (d2 <= l_max_values_m1[jindx[j]+i][d1])) r = urn() * Q_M1[jindx[j]+i][d1][d2/2]; } if(r == 0.) nrerror("backtrack_qm1@2Dpfold.c: backtracking failed\n"); ii = my_iindx[i]; for (qt=0., l=i+TURN+1; l<=j; l++) { type = ptype[ii-l]; if (type){ FLT_OR_DBL tmp = exp_E_MLstem(type, S1[i-1], S1[l+1], pf_params) * pow(pf_params->expMLbase, j-l) * scale[j-l]; /* compute the introduced distance to reference structures */ da = referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[i]-l]; db = referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[i]-l]; cnt1 = cnt2 = -1; if(d1 == -1){ if(Q_B_rem[ii-l] != 0.){ qt += Q_B_rem[ii-l] * tmp; if(qt >= r) goto backtrack_qm1_early_escape; } if(Q_B[ii-l]) for(cnt1 = k_min_values_b[ii-l]; cnt1 <= k_max_values_b[ii-l]; cnt1++) for(cnt2 = l_min_values_b[ii-l][cnt1]; cnt2 <= l_max_values_b[ii-l][cnt1]; cnt2 += 2) if(((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)){ qt += Q_B[ii-l][cnt1][cnt2/2] * tmp; if(qt >= r) goto backtrack_qm1_early_escape; } } else{ /* get energy contributions */ if(d1 >= da && d2 >= db) if((d1 - da >= k_min_values_b[ii-l]) && (d1 - da <= k_max_values_b[ii-l])) if((d2 - db >= l_min_values_b[ii-l][d1-da]) && (d2 - db <= l_max_values_b[ii-l][d1-da])){ cnt1 = d1 - da; cnt2 = d2 - db; qt += Q_B[ii-l][cnt1][cnt2/2] * tmp; if (qt>=r) goto backtrack_qm1_early_escape; } } } } if (l>j) nrerror("backtrack failed in qm1"); backtrack_qm1_early_escape: backtrack(vars, pstruc, cnt1, cnt2, i,l); } PRIVATE void backtrack_qm(TwoDpfold_vars *vars, char *pstruc, int d1, int d2, unsigned int i, unsigned int j){ /* divide multiloop into qm and qm1 */ FLT_OR_DBL r, *scale; unsigned int maxD1, maxD2, da, db, da2, db2; unsigned int *referenceBPs1, *referenceBPs2; pf_paramT *pf_params; /* holds all [unscaled] pf parameters */ int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4; pf_params = vars->pf_params; maxD1 = vars->maxD1; maxD2 = vars->maxD2; my_iindx = vars->my_iindx; jindx = vars->jindx; scale = vars->scale; referenceBPs1 = vars->referenceBPs1; referenceBPs2 = vars->referenceBPs2; FLT_OR_DBL ***Q_M, ***Q_M1, *Q_M_rem, *Q_M1_rem; int *k_min_values_m, *k_max_values_m,*k_min_values_m1, *k_max_values_m1; int **l_min_values_m, **l_max_values_m,**l_min_values_m1, **l_max_values_m1; Q_M = vars->Q_M; k_min_values_m = vars->k_min_values_m; k_max_values_m = vars->k_max_values_m; l_min_values_m = vars->l_min_values_m; l_max_values_m = vars->l_max_values_m; Q_M1 = vars->Q_M1; k_min_values_m1 = vars->k_min_values_m1; k_max_values_m1 = vars->k_max_values_m1; l_min_values_m1 = vars->l_min_values_m1; l_max_values_m1 = vars->l_max_values_m1; Q_M_rem = vars->Q_M_rem; Q_M1_rem = vars->Q_M1_rem; double qmt = 0; unsigned int k; while(j>i){ /* now backtrack [i ... j] in qm[] */ /* find qm contribution */ if(d1 == -1) r = urn() * Q_M_rem[my_iindx[i]-j]; else{ if(Q_M[my_iindx[i]-j]) if((d1 >= k_min_values_m[my_iindx[i]-j]) && (d1 <= k_max_values_m[my_iindx[i]-j])) if((d2 >= l_min_values_m[my_iindx[i]-j][d1]) && (d2 <= l_max_values_m[my_iindx[i]-j][d1])) r = urn() * Q_M[my_iindx[i]-j][d1][d2/2]; } if(r == 0.) nrerror("backtrack_qm@2Dpfold.c: backtracking failed in finding qm contribution\n"); qmt = 0.; if(d1 == -1){ if(Q_M1_rem[jindx[j]+i] != 0.){ qmt += Q_M1_rem[jindx[j]+i]; if(qmt >= r){ backtrack_qm1(vars, pstruc, d1, d2, i, j); return; } } for(k=i+1; k<=j; k++){ FLT_OR_DBL tmp = pow(pf_params->expMLbase, k-i) * scale[k-i]; if(Q_M1_rem[jindx[j]+k] != 0.){ qmt += Q_M1_rem[jindx[j]+k] * tmp; if(qmt >= r){ backtrack_qm1(vars, pstruc, d1, d2, k, j); return; } } da2 = referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[k]-j]; db2 = referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[k]-j]; if(Q_M1[jindx[j]+k]) for(cnt1 = k_min_values_m1[jindx[j]+k]; cnt1 <= k_max_values_m1[jindx[j]+k]; cnt1++) for(cnt2 = l_min_values_m1[jindx[j]+k][cnt1]; cnt2 <= l_max_values_m1[jindx[j]+k][cnt1]; cnt2 += 2) if(((cnt1 + da2) > maxD1) || ((cnt2 + db2) > maxD2)){ qmt += Q_M1[jindx[j]+k][cnt1][cnt2/2] * tmp; if(qmt >= r){ backtrack_qm1(vars, pstruc, cnt1, cnt2, k, j); return; } } da = da2 - referenceBPs1[my_iindx[i]-k+1]; db = db2 - referenceBPs2[my_iindx[i]-k+1]; cnt1 = cnt2 = cnt3 = cnt4 = -1; if(Q_M_rem[my_iindx[i]-k+1] != 0.){ if(Q_M1_rem[jindx[j]+k] != 0.){ qmt += Q_M_rem[my_iindx[i]-k+1] * Q_M1_rem[jindx[j]+k]; if(qmt >= r) goto backtrack_qm_early_escape; } if(Q_M1[jindx[j]+k]) for(cnt3 = k_min_values_m1[jindx[j]+k]; cnt3 <= k_max_values_m1[jindx[j]+k]; cnt3++) for(cnt4 = l_min_values_m1[jindx[j]+k][cnt3]; cnt4 <= l_max_values_m1[jindx[j]+k][cnt3]; cnt4 += 2){ qmt += Q_M_rem[my_iindx[i]-k+1] * Q_M1[jindx[j]+k][cnt3][cnt4/2]; if(qmt >= r) goto backtrack_qm_early_escape; } } if(Q_M1_rem[jindx[j]+k] != 0.){ cnt3 = cnt4 = -1; if(Q_M[my_iindx[i]-k+1]) for(cnt1 = k_min_values_m[my_iindx[i]-k+1]; cnt1 <= k_max_values_m[my_iindx[i]-k+1]; cnt1++) for(cnt2 = l_min_values_m[my_iindx[i]-k+1][cnt1]; cnt2 <= l_max_values_m[my_iindx[i]-k+1][cnt1]; cnt2 += 2){ qmt += Q_M[my_iindx[i]-k+1][cnt1][cnt2/2] * Q_M1_rem[jindx[j]+k]; if(qmt >= r) goto backtrack_qm_early_escape; } } if(!Q_M[my_iindx[i]-k+1]) continue; if(!Q_M1[jindx[j]+k]) continue; for(cnt1 = k_min_values_m[my_iindx[i]-k+1]; cnt1 <= k_max_values_m[my_iindx[i]-k+1]; cnt1++) for(cnt2 = l_min_values_m[my_iindx[i]-k+1][cnt1]; cnt2 <= l_max_values_m[my_iindx[i]-k+1][cnt1]; cnt2 += 2) for(cnt3 = k_min_values_m1[jindx[j]+k]; cnt3 <= k_max_values_m1[jindx[j]+k]; cnt3++) for(cnt4 = l_min_values_m1[jindx[j]+k][cnt3]; cnt4 <= l_max_values_m1[jindx[j]+k][cnt3]; cnt4 += 2) if(((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)){ qmt += Q_M[my_iindx[i]-k+1][cnt1][cnt2/2] * Q_M1[jindx[j]+k][cnt3][cnt4/2]; if(qmt >= r) goto backtrack_qm_early_escape; } } } else{ /* find corresponding qm1 contribution */ if(Q_M1[jindx[j]+i]) if((d1 >= k_min_values_m1[jindx[j]+i]) && (d1 <= k_max_values_m1[jindx[j]+i])) if((d2 >= l_min_values_m1[jindx[j]+i][d1]) && (d2 <= l_max_values_m1[jindx[j]+i][d1])){ qmt = Q_M1[jindx[j]+i][d1][d2/2]; } k=i; if(qmt<r){ for(k=i+1; k<=j; k++){ /* calculate introduced distancies to reference structures */ da2 = referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[k]-j]; db2 = referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[k]-j]; da = da2 - referenceBPs1[my_iindx[i]-k+1]; db = db2 - referenceBPs2[my_iindx[i]-k+1]; FLT_OR_DBL tmp = pow(pf_params->expMLbase, k-i) * scale[k-i]; /* collect unpaired + qm1 contributions */ if(d1 >= da2 && d2 >= db2) if((d1 - da2 >= k_min_values_m1[jindx[j]+k]) && (d1 - da2 <= k_max_values_m1[jindx[j]+k])) if((d2 - db2 >= l_min_values_m1[jindx[j]+k][d1-da2]) && (d2 - db2 <= l_max_values_m1[jindx[j]+k][d1-da2])){ cnt3 = d1-da2; cnt4 = d2-db2; qmt += Q_M1[jindx[j]+k][cnt3][cnt4/2] * tmp; if(qmt >= r){ backtrack_qm1(vars, pstruc, cnt3, cnt4, k, j); return; } } /* collect qm + qm1 contributions */ if(d1 >= da && d2 >= db && Q_M[my_iindx[i]-k+1] && Q_M1[jindx[j]+k]) for(cnt1 = k_min_values_m[my_iindx[i]-k+1]; cnt1 <= MIN2(k_max_values_m[my_iindx[i]-k+1], d1 - da); cnt1++) for(cnt2 = l_min_values_m[my_iindx[i]-k+1][cnt1]; cnt2 <= MIN2(l_max_values_m[my_iindx[i]-k+1][cnt1], d2 - db); cnt2+=2) if((d1 - da - cnt1 >= k_min_values_m1[jindx[j]+k]) && (d1 - da - cnt1 <= k_max_values_m1[jindx[j]+k])) if((d2 - db - cnt2 >= l_min_values_m1[jindx[j]+k][d1-da-cnt1]) && (d2 - db - cnt2 <= l_max_values_m1[jindx[j]+k][d1-da-cnt1])){ cnt3 = d1 - da - cnt1; cnt4 = d2 - db - cnt2; qmt += Q_M[my_iindx[i]-k+1][cnt1][cnt2/2] * Q_M1[jindx[j]+k][cnt3][cnt4/2]; if(qmt >= r) goto backtrack_qm_early_escape; } } } else{ backtrack_qm1(vars, pstruc, d1, d2, k, j); return; } } if(k>j) nrerror("backtrack_qm@2Dpfold.c: backtrack failed in qm"); backtrack_qm_early_escape: backtrack_qm1(vars, pstruc, cnt3, cnt4, k, j); if(k<i+TURN) break; /* no more pairs */ d1 = cnt1; d2 = cnt2; if(d1 == referenceBPs1[my_iindx[i]-k+1] && d2 == referenceBPs2[my_iindx[i]-k+1]){ /* is interval [i,k] totally unpaired? */ FLT_OR_DBL tmp = pow(pf_params->expMLbase, k-i) * scale[k-i]; r = urn() * (Q_M[my_iindx[i]-k+1][d1][d2/2] + tmp); if(tmp >= r) return; /* no more pairs */ } j = k-1; } } PRIVATE void adjustArrayBoundaries(FLT_OR_DBL ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_post, int k_max_post, int *l_min_post, int *l_max_post){ int cnt1; int k_diff_pre = k_min_post - *k_min; int mem_size = k_max_post - k_min_post + 1; if(k_min_post < INF){ /* free all the unused memory behind actual data */ for(cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++){ (*array)[cnt1] += (*l_min)[cnt1]/2; free((*array)[cnt1]); } /* free unused memory before actual data */ for(cnt1 = *k_min; cnt1 < k_min_post; cnt1++){ (*array)[cnt1] += (*l_min)[cnt1]/2; free((*array)[cnt1]); } /* move data to front and thereby eliminating unused memory in front of actual data */ if(k_diff_pre > 0){ memmove((FLT_OR_DBL **)(*array),((FLT_OR_DBL **)(*array)) + k_diff_pre, sizeof(FLT_OR_DBL *) * mem_size); memmove((int *) (*l_min),((int *) (*l_min)) + k_diff_pre, sizeof(int) * mem_size); memmove((int *) (*l_max),((int *) (*l_max)) + k_diff_pre, sizeof(int) * mem_size); } /* reallocating memory to actual size used */ *array += *k_min; *array = (FLT_OR_DBL **)realloc(*array, sizeof(FLT_OR_DBL *) * mem_size); *array -= k_min_post; *l_min += *k_min; *l_min = (int *)realloc(*l_min, sizeof(int) * mem_size); *l_min -= k_min_post; *l_max += *k_min; *l_max = (int *)realloc(*l_max, sizeof(int) * mem_size); *l_max -= k_min_post; for(cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++){ if(l_min_post[cnt1] < INF){ /* new memsize */ mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1)/2 + 1; /* reshift the pointer */ (*array)[cnt1] += (*l_min)[cnt1]/2; int shift = (l_min_post[cnt1]%2 == (*l_min)[cnt1]%2) ? 0 : 1; /* eliminate unused memory in front of actual data */ unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1])/2 + shift; if(start > 0) memmove((FLT_OR_DBL *)((*array)[cnt1]), (FLT_OR_DBL *)((*array)[cnt1])+start, sizeof(FLT_OR_DBL) * mem_size); (*array)[cnt1] = (FLT_OR_DBL *) realloc((*array)[cnt1], sizeof(FLT_OR_DBL) * mem_size); (*array)[cnt1] -= l_min_post[cnt1]/2; } else{ /* free according memory */ (*array)[cnt1] += (*l_min)[cnt1]/2; free((*array)[cnt1]); } (*l_min)[cnt1] = l_min_post[cnt1]; (*l_max)[cnt1] = l_max_post[cnt1]; } } else{ /* we have to free all unused memory */ for(cnt1 = *k_min; cnt1 <= *k_max; cnt1++){ (*array)[cnt1] += (*l_min)[cnt1]/2; free((*array)[cnt1]); } (*l_min) += *k_min; (*l_max) += *k_min; free(*l_min); free(*l_max); (*array) += *k_min; free(*array); *array = NULL; } l_min_post += *k_min; l_max_post += *k_min; *k_min = k_min_post; *k_max = k_max_post; free(l_min_post); free(l_max_post); } INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l){ int i; *min_k = INF; *max_k = 0; *min_l = (int *)space(sizeof(int) * size); *max_l = (int *)space(sizeof(int) * size); for(i = 0; i < size; i++){ (*min_l)[i] = INF; (*max_l)[i] = 0; } *min_l -= shift; *max_l -= shift; } INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l){ (*min_l)[d1] = MIN2((*min_l)[d1], d2); (*max_l)[d1] = MAX2((*max_l)[d1], d2); *min_k = MIN2(*min_k, d1); *max_k = MAX2(*max_k, d1); } INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l){ int cnt; int mem = max_k_pre - min_k_pre + 1; *min_k = min_k_pre; *max_k = max_k_pre; *min_l = (int *) space(sizeof(int) * mem); *max_l = (int *) space(sizeof(int) * mem); *min_l -= min_k_pre; *max_l -= min_k_pre; /* for each k guess the according minimum l*/ for(cnt = min_k_pre; cnt <= max_k_pre; cnt++){ (*min_l)[cnt] = min_l_pre; (*max_l)[cnt] = max_l_pre; while((*min_l)[cnt] + cnt < bpdist) (*min_l)[cnt]++; if((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2)) (*min_l)[cnt]++; } } INLINE PRIVATE void prepareArray(FLT_OR_DBL ***array, int min_k, int max_k, int *min_l, int *max_l){ int i, mem; *array = (FLT_OR_DBL **)space(sizeof(FLT_OR_DBL *) * (max_k - min_k + 1)); *array -= min_k; for(i = min_k; i <= max_k; i++){ mem = (max_l[i] - min_l[i] + 1)/2 + 1; (*array)[i] = (FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * mem); (*array)[i] -= min_l[i]/2; } }
hello_omp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void Hello_thread(void); int main(int argc, char *argv[]) { int thread_count=4; if(argc < 2) printf("usage: hello_omp <number threads>\n"); else { sscanf(argv[1], "%d", &thread_count); printf("Will use %d threads for test\n", thread_count); } #pragma omp parallel num_threads(thread_count) Hello_thread(); return 0; } void Hello_thread(void) { int my_rank = omp_get_thread_num(); int thread_count = omp_get_num_threads(); printf("Hello from OMP thread %d of %d\n", my_rank, thread_count); }
mrcore.c
/*************************************************************************** * Copyright 2013 CertiVox IOM Ltd. * * This file is part of CertiVox MIRACL Crypto SDK. * * The CertiVox MIRACL Crypto SDK provides developers with an * extensive and efficient set of cryptographic functions. * For further information about its features and functionalities please * refer to http://www.certivox.com * * * The CertiVox MIRACL Crypto SDK is free software: you can * redistribute it and/or modify it under the terms of the * GNU Affero General Public License as published by the * Free Software Foundation, either version 3 of the License, * or (at your option) any later version. * * * The CertiVox MIRACL Crypto SDK is distributed in the hope * that it will be useful, but WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Public License for more details. * * * You should have received a copy of the GNU Affero General Public * License along with CertiVox MIRACL Crypto SDK. * If not, see <http://www.gnu.org/licenses/>. * * You can be released from the requirements of the license by purchasing * a commercial license. Buying such a license is mandatory as soon as you * develop commercial activities involving the CertiVox MIRACL Crypto SDK * without disclosing the source code of your own applications, or shipping * the CertiVox MIRACL Crypto SDK with a closed source product. * * ***************************************************************************/ /* * * MIRACL Core module - contains initialisation code and general purpose * utilities * mrcore.c * * Space can be saved by removing unneeded functions (mr_and ?) * */ #include "miracl.h" #include <stdlib.h> #include <string.h> #ifdef MR_FP #include <math.h> #endif /*** Multi-Threaded Support ***/ #ifndef MR_GENERIC_MT #ifdef MR_OPENMP_MT #include <omp.h> #define MR_MIP_EXISTS miracl *mr_mip; #pragma omp threadprivate(mr_mip) miracl *get_mip() { return mr_mip; } void mr_init_threading() { } void mr_end_threading() { } #endif #ifdef MR_WINDOWS_MT #include <windows.h> DWORD mr_key; miracl *get_mip() { return (miracl *)TlsGetValue(mr_key); } void mr_init_threading() { mr_key=TlsAlloc(); } void mr_end_threading() { TlsFree(mr_key); } #endif #ifdef MR_UNIX_MT #include <pthread.h> pthread_key_t mr_key; miracl *get_mip() { return (miracl *)pthread_getspecific(mr_key); } void mr_init_threading() { pthread_key_create(&mr_key,(void(*)(void *))NULL); } void mr_end_threading() { pthread_key_delete(mr_key); } #endif #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_OPENMP_MT #ifdef MR_STATIC miracl mip; miracl *mr_mip=&mip; #else miracl *mr_mip=NULL; /* MIRACL's one and only global variable */ #endif #define MR_MIP_EXISTS miracl *get_mip() { return (miracl *)mr_mip; } #endif #endif #endif #ifdef MR_MIP_EXISTS void set_mip(miracl *mip) { mr_mip=mip; } #endif #endif /* See Advanced Windows by Jeffrey Richter, Chapter 12 for methods for creating different instances of this global for each executing thread when using Windows '95/NT */ #ifdef MR_STATIC #if MIRACL==8 static const int mr_small_primes[]= {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103, 107,109,113,127,0}; #else static const int mr_small_primes[]= {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103, 107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211, 223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331, 337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449, 457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587, 593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709, 719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853, 857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991, 997,0}; #endif #endif #ifndef MR_STRIPPED_DOWN #ifndef MR_NO_STANDARD_IO static char *names[] = {(char *)"your program",(char *)"innum",(char *)"otnum",(char *)"jack",(char *)"normalise", (char *)"multiply",(char *)"divide",(char *)"incr",(char *)"decr",(char *)"premult", (char *)"subdiv",(char *)"fdsize",(char *)"egcd",(char *)"cbase", (char *)"cinnum",(char *)"cotnum",(char *)"nroot",(char *)"power", (char *)"powmod",(char *)"bigdig",(char *)"bigrand",(char *)"nxprime",(char *)"isprime", (char *)"mirvar",(char *)"mad",(char *)"multi_inverse",(char *)"putdig", (char *)"add",(char *)"subtract",(char *)"mirsys",(char *)"xgcd", (char *)"fpack",(char *)"dconv",(char *)"mr_shift",(char *)"mround",(char *)"fmul", (char *)"fdiv",(char *)"fadd",(char *)"fsub",(char *)"fcomp",(char *)"fconv", (char *)"frecip",(char *)"fpmul",(char *)"fincr",(char *)"",(char *)"ftrunc", (char *)"frand",(char *)"sftbit",(char *)"build",(char *)"logb2",(char *)"expint", (char *)"fpower",(char *)"froot",(char *)"fpi",(char *)"fexp",(char *)"flog",(char *)"fpowf", (char *)"ftan",(char *)"fatan",(char *)"fsin",(char *)"fasin",(char *)"fcos",(char *)"facos", (char *)"ftanh",(char *)"fatanh",(char *)"fsinh",(char *)"fasinh",(char *)"fcosh", (char *)"facosh",(char *)"flop",(char *)"gprime",(char *)"powltr",(char *)"fft_mult", (char *)"crt_init",(char *)"crt",(char *)"otstr",(char *)"instr",(char *)"cotstr",(char *)"cinstr",(char *)"powmod2", (char *)"prepare_monty",(char *)"nres",(char *)"redc",(char *)"nres_modmult",(char *)"nres_powmod", (char *)"nres_moddiv",(char *)"nres_powltr",(char *)"divisible",(char *)"remain", (char *)"fmodulo",(char *)"nres_modadd",(char *)"nres_modsub",(char *)"nres_negate", (char *)"ecurve_init",(char *)"ecurve_add",(char *)"ecurve_mult", (char *)"epoint_init",(char *)"epoint_set",(char *)"epoint_get",(char *)"nres_powmod2", (char *)"nres_sqroot",(char *)"sqroot",(char *)"nres_premult",(char *)"ecurve_mult2", (char *)"ecurve_sub",(char *)"trial_division",(char *)"nxsafeprime",(char *)"nres_lucas",(char *)"lucas", (char *)"brick_init",(char *)"pow_brick",(char *)"set_user_function", (char *)"nres_powmodn",(char *)"powmodn",(char *)"ecurve_multn", (char *)"ebrick_init",(char *)"mul_brick",(char *)"epoint_norm",(char *)"nres_multi_inverse",(char *)"", (char *)"nres_dotprod",(char *)"epoint_negate",(char *)"ecurve_multi_add", (char *)"ecurve2_init",(char *)"",(char *)"epoint2_set",(char *)"epoint2_norm",(char *)"epoint2_get", (char *)"epoint2_comp",(char *)"ecurve2_add",(char *)"epoint2_negate",(char *)"ecurve2_sub", (char *)"ecurve2_multi_add",(char *)"ecurve2_mult",(char *)"ecurve2_multn",(char *)"ecurve2_mult2", (char *)"ebrick2_init",(char *)"mul2_brick",(char *)"prepare_basis",(char *)"strong_bigrand", (char *)"bytes_to_big",(char *)"big_to_bytes",(char *)"set_io_buffer_size", (char *)"epoint_getxyz",(char *)"epoint_double_add",(char *)"nres_double_inverse", (char *)"double_inverse",(char *)"epoint_x",(char *)"hamming",(char *)"expb2",(char *)"bigbits", (char *)"nres_lazy",(char *)"zzn2_imul",(char *)"nres_double_modadd",(char *)"nres_double_modsub", /*155*/(char *)"",(char *)"zzn2_from_int",(char *)"zzn2_negate",(char *)"zzn2_conj",(char *)"zzn2_add", (char *)"zzn2_sub",(char *)"zzn2_smul",(char *)"zzn2_mul",(char *)"zzn2_inv",(char *)"zzn2_timesi",(char *)"zzn2_powl", (char *)"zzn2_from_bigs",(char *)"zzn2_from_big",(char *)"zzn2_from_ints", (char *)"zzn2_sadd",(char *)"zzn2_ssub",(char *)"zzn2_times_irp",(char *)"zzn2_div2", (char *)"zzn3_from_int",(char *)"zzn3_from_ints",(char *)"zzn3_from_bigs", (char *)"zzn3_from_big",(char *)"zzn3_negate",(char *)"zzn3_powq",(char *)"zzn3_init", (char *)"zzn3_add",(char *)"zzn3_sadd",(char *)"zzn3_sub",(char *)"zzn3_ssub",(char *)"zzn3_smul", (char *)"zzn3_imul",(char *)"zzn3_mul",(char *)"zzn3_inv",(char *)"zzn3_div2",(char *)"zzn3_timesi", (char *)"epoint_multi_norm",(char *)"mr_jsf",(char *)"epoint2_multi_norm", (char *)"ecn2_compare",(char *)"ecn2_norm",(char *)"ecn2_set",(char *)"zzn2_txx", (char *)"zzn2_txd",(char *)"nres_div2",(char *)"nres_div3",(char *)"zzn2_div3", (char *)"ecn2_setx",(char *)"ecn2_rhs",(char *)"zzn2_qr",(char *)"zzn2_sqrt",(char *)"ecn2_add",(char *)"ecn2_mul2_jsf",(char *)"ecn2_mul", (char *)"nres_div5",(char *)"zzn2_div5",(char *)"zzn2_sqr",(char *)"ecn2_add_sub",(char *)"ecn2_psi",(char *)"invmodp", (char *)"zzn2_multi_inverse",(char *)"ecn2_multi_norm",(char *)"ecn2_precomp",(char *)"ecn2_mul4_gls_v", (char *)"ecn2_mul2",(char *)"ecn2_precomp_gls",(char *)"ecn2_mul2_gls", (char *)"ecn2_brick_init",(char *)"ecn2_mul_brick_gls",(char *)"ecn2_multn",(char *)"zzn3_timesi2", (char *)"nres_complex",(char *)"zzn4_from_int",(char *)"zzn4_negate",(char *)"zzn4_conj",(char *)"zzn4_add",(char *)"zzn4_sadd",(char *)"zzn4_sub",(char *)"zzn4_ssub",(char *)"zzn4_smul",(char *)"zzn4_sqr", (char *)"zzn4_mul",(char *)"zzn4_inv",(char *)"zzn4_div2",(char *)"zzn4_powq",(char *)"zzn4_tx",(char *)"zzn4_imul",(char *)"zzn4_lmul",(char *)"zzn4_from_big", (char *)"ecn2_mult4"}; /* 0 - 243 (244 in all) */ #endif #endif #ifdef MR_NOASM /* C only versions of muldiv/muldvd/muldvd2/muldvm */ /* Note that mr_large should be twice the size of mr_small */ mr_small muldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_small *rp) { mr_small q; mr_large ldres,p=(mr_large)a*b+c; q=(mr_small)(MR_LROUND(p/m)); *rp=(mr_small)(p-(mr_large)q*m); return q; } #ifdef MR_FP_ROUNDING mr_small imuldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_large im,mr_small *rp) { mr_small q; mr_large ldres,p=(mr_large)a*b+c; q=(mr_small)MR_LROUND(p*im); *rp=(mr_small)(p-(mr_large)q*m); return q; } #endif #ifndef MR_NOFULLWIDTH mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp) { mr_small q; union doubleword dble; dble.h[MR_BOT]=c; dble.h[MR_TOP]=a; q=(mr_small)(dble.d/m); *rp=(mr_small)(dble.d-(mr_large)q*m); return q; } mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp) { union doubleword dble; dble.d=(mr_large)a*b+c; *rp=dble.h[MR_BOT]; return dble.h[MR_TOP]; } void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp) { union doubleword dble; dble.d=(mr_large)a*b+*c+*rp; *rp=dble.h[MR_BOT]; *c=dble.h[MR_TOP]; } #endif #endif #ifdef MR_NOFULLWIDTH /* no FULLWIDTH working, so supply dummies */ /* mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp) { return (mr_small)0; } mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp) { return (mr_small)0; } void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp) { } */ #endif #ifndef MR_NO_STANDARD_IO static void mputs(char *s) { /* output a string */ int i=0; while (s[i]!=0) fputc((int)s[i++],stdout); } #endif void mr_berror(_MIPD_ int nerr) { /* Big number error routine */ #ifndef MR_STRIPPED_DOWN int i; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERCON) { mr_mip->ERNUM=nerr; return; } #ifndef MR_NO_STANDARD_IO #ifndef MR_STRIPPED_DOWN mputs((char *)"\nMIRACL error from routine "); if (mr_mip->depth<MR_MAXDEPTH) mputs(names[mr_mip->trace[mr_mip->depth]]); else mputs((char *)"???"); fputc('\n',stdout); for (i=mr_mip->depth-1;i>=0;i--) { mputs((char *)" called from "); if (i<MR_MAXDEPTH) mputs(names[mr_mip->trace[i]]); else mputs((char *)"???"); fputc('\n',stdout); } switch (nerr) { case 1 : mputs((char *)"Number base too big for representation\n"); break; case 2 : mputs((char *)"Division by zero attempted\n"); break; case 3 : mputs((char *)"Overflow - Number too big\n"); break; case 4 : mputs((char *)"Internal result is negative\n"); break; case 5 : mputs((char *)"Input format error\n"); break; case 6 : mputs((char *)"Illegal number base\n"); break; case 7 : mputs((char *)"Illegal parameter usage\n"); break; case 8 : mputs((char *)"Out of space\n"); break; case 9 : mputs((char *)"Even root of a negative number\n"); break; case 10: mputs((char *)"Raising integer to negative power\n"); break; case 11: mputs((char *)"Attempt to take illegal root\n"); break; case 12: mputs((char *)"Integer operation attempted on Flash number\n"); break; case 13: mputs((char *)"Flash overflow\n"); break; case 14: mputs((char *)"Numbers too big\n"); break; case 15: mputs((char *)"Log of a non-positive number\n"); break; case 16: mputs((char *)"Flash to double conversion failure\n"); break; case 17: mputs((char *)"I/O buffer overflow\n"); break; case 18: mputs((char *)"MIRACL not initialised - no call to mirsys()\n"); break; case 19: mputs((char *)"Illegal modulus \n"); break; case 20: mputs((char *)"No modulus defined\n"); break; case 21: mputs((char *)"Exponent too big\n"); break; case 22: mputs((char *)"Unsupported Feature - check mirdef.h\n"); break; case 23: mputs((char *)"Specified double length type isn't double length\n"); break; case 24: mputs((char *)"Specified basis is NOT irreducible\n"); break; case 25: mputs((char *)"Unable to control Floating-point rounding\n"); break; case 26: mputs((char *)"Base must be binary (MR_ALWAYS_BINARY defined in mirdef.h ?)\n"); break; case 27: mputs((char *)"No irreducible basis defined\n"); break; case 28: mputs((char *)"Composite modulus\n"); break; case 29: mputs((char *)"Input/output error when reading from RNG device node\n"); break; default: mputs((char *)"Undefined error\n"); break; } exit(0); #else mputs((char *)"MIRACL error\n"); exit(0); #endif #endif } #ifndef MR_STRIPPED_DOWN void mr_track(_MIPDO_ ) { /* track course of program execution * * through the MIRACL routines */ #ifndef MR_NO_STANDARD_IO int i; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif for (i=0;i<mr_mip->depth;i++) fputc('-',stdout); fputc('>',stdout); mputs(names[mr_mip->trace[mr_mip->depth]]); fputc('\n',stdout); #endif } #endif #ifndef MR_NO_RAND mr_small brand(_MIPDO_ ) { /* Marsaglia & Zaman random number generator */ int i,k; mr_unsign32 pdiff,t; mr_small r; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->lg2b>32) { /* underlying type is > 32 bits. Assume <= 64 bits */ mr_mip->rndptr+=2; if (mr_mip->rndptr<NK-1) { r=(mr_small)mr_mip->ira[mr_mip->rndptr]; r=mr_shiftbits(r,mr_mip->lg2b-32); r+=(mr_small)mr_mip->ira[mr_mip->rndptr+1]; return r; } } else { mr_mip->rndptr++; if (mr_mip->rndptr<NK) return (mr_small)mr_mip->ira[mr_mip->rndptr]; } mr_mip->rndptr=0; for (i=0,k=NK-NJ;i<NK;i++,k++) { /* calculate next NK values */ if (k==NK) k=0; t=mr_mip->ira[k]; pdiff=t - mr_mip->ira[i] - mr_mip->borrow; if (pdiff<t) mr_mip->borrow=0; if (pdiff>t) mr_mip->borrow=1; mr_mip->ira[i]=pdiff; } if (mr_mip->lg2b>32) { /* double up */ r=(mr_small)mr_mip->ira[0]; r=mr_shiftbits(r,mr_mip->lg2b-32); r+=(mr_small)mr_mip->ira[1]; return r; } else return (mr_small)(mr_mip->ira[0]); } void irand(_MIPD_ mr_unsign32 seed) { /* initialise random number system */ int i,in; mr_unsign32 t,m=1L; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif mr_mip->borrow=0L; mr_mip->rndptr=0; mr_mip->ira[0]=seed; for (i=1;i<NK;i++) { /* fill initialisation vector */ in=(NV*i)%NK; mr_mip->ira[in]=m; t=m; m=seed-m; seed=t; } for (i=0;i<1000;i++) brand(_MIPPO_ ); /* "warm-up" & stir the generator */ } #endif mr_small mr_shiftbits(mr_small x,int n) { #ifdef MR_FP int i; mr_small dres; if (n==0) return x; if (n>0) { for (i=0;i<n;i++) x=x+x; return x; } n=-n; for (i=0;i<n;i++) x=MR_DIV(x,2.0); return x; #else if (n==0) return x; if (n>0) x<<=n; else x>>=(-n); return x; #endif } mr_small mr_setbase(_MIPD_ mr_small nb) { /* set base. Pack as many digits as * * possible into each computer word */ mr_small temp; #ifdef MR_FP mr_small dres; #endif #ifndef MR_NOFULLWIDTH BOOL fits; int bits; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif fits=FALSE; bits=MIRACL; while (bits>1) { bits/=2; temp=((mr_small)1<<bits); if (temp==nb) { fits=TRUE; break; } if (temp<nb || (bits%2)!=0) break; } if (fits) { mr_mip->apbase=nb; mr_mip->pack=MIRACL/bits; mr_mip->base=0; return 0; } #endif mr_mip->apbase=nb; mr_mip->pack=1; mr_mip->base=nb; #ifdef MR_SIMPLE_BASE return 0; #else if (mr_mip->base==0) return 0; temp=MR_DIV(MAXBASE,nb); while (temp>=nb) { temp=MR_DIV(temp,nb); mr_mip->base*=nb; mr_mip->pack++; } #ifdef MR_FP_ROUNDING mr_mip->inverse_base=mr_invert(mr_mip->base); return mr_mip->inverse_base; #else return 0; #endif #endif } #ifdef MR_FLASH BOOL fit(big x,big y,int f) { /* returns TRUE if x/y would fit flash format of length f */ int n,d; n=(int)(x->len&(MR_OBITS)); d=(int)(y->len&(MR_OBITS)); if (n==1 && x->w[0]==1) n=0; if (d==1 && y->w[0]==1) d=0; if (n+d<=f) return TRUE; return FALSE; } #endif int mr_lent(flash x) { /* return length of big or flash in words */ mr_lentype lx; lx=(x->len&(MR_OBITS)); #ifdef MR_FLASH return (int)((lx&(MR_MSK))+((lx>>(MR_BTS))&(MR_MSK))); #else return (int)lx; #endif } void zero(flash x) { /* set big/flash number to zero */ int i,n; mr_small *g; if (x==NULL) return; #ifdef MR_FLASH n=mr_lent(x); #else n=(x->len&MR_OBITS); #endif g=x->w; for (i=0;i<n;i++) g[i]=0; x->len=0; } void uconvert(_MIPD_ unsigned int n ,big x) { /* convert unsigned integer n to big number format */ int m; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH #if MR_IBITS > MIRACL while (n>0) { x->w[m++]=(mr_small)(n%((mr_small)1<<(MIRACL))); n/=((mr_small)1<<(MIRACL)); } #else x->w[m++]=(mr_small)n; #endif #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=MR_REMAIN((mr_small)n,mr_mip->base); n=(unsigned int)((mr_small)n/mr_mip->base); } #endif x->len=m; } void tconvert(_MIPD_ mr_utype n,big x) { mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } x->w[0]=n; x->len=1; x->len|=s; } void convert(_MIPD_ int n ,big x) { /* convert signed integer n to big number format */ mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } uconvert(_MIPP_ (unsigned int)n,x); x->len|=s; } #ifndef MR_STATIC #ifdef mr_dltype void dlconv(_MIPD_ mr_dltype n,big x) { /* convert double length integer to big number format - rarely needed */ int m; mr_lentype s; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; s=0; if (n<0) { s=MR_MSBIT; n=(-n); } m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH while (n>0) { x->w[m++]=(mr_small)(n%((mr_dltype)1<<(MIRACL))); n/=((mr_dltype)1<<(MIRACL)); } #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=(mr_small)MR_REMAIN(n,mr_mip->base); n/=mr_mip->base; } #endif x->len=(m|s); } #endif void ulgconv(_MIPD_ unsigned long n,big x) { /* convert unsigned long integer to big number format - rarely needed */ int m; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH #if MR_LBITS > MIRACL while (n>0) { x->w[m++]=(mr_small)(n%(1L<<(MIRACL))); n/=(1L<<(MIRACL)); } #else x->w[m++]=(mr_small)n; #endif #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=MR_REMAIN(n,mr_mip->base); n=(unsigned long)((mr_small)n/mr_mip->base); } #endif x->len=m; } void lgconv(_MIPD_ long n,big x) { /* convert signed long integer to big number format - rarely needed */ mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } ulgconv(_MIPP_ (unsigned long)n,x); x->len|=s; } flash mirvar(_MIPD_ int iv) { /* initialize big/flash number */ flash x; int align; char *ptr; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; MR_IN(23); if (!(mr_mip->active)) { mr_berror(_MIPP_ MR_ERR_NO_MIRSYS); MR_OUT return NULL; } /* OK, now I control alignment.... */ /* Allocate space for big, the length, the pointer, and the array */ /* Do it all in one memory allocation - this is quicker */ /* Ensure that the array has correct alignment */ x=(big)mr_alloc(_MIPP_ mr_size(mr_mip->nib-1),1); if (x==NULL) { MR_OUT return x; } ptr=(char *)&x->w; align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small); x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align); if (iv!=0) convert(_MIPP_ iv,x); MR_OUT return x; } #endif flash mirvar_mem_variable(char *mem,int index,int sz) { flash x; int align; char *ptr; int offset,r; /* alignment */ offset=0; r=(unsigned long)mem%MR_SL; if (r>0) offset=MR_SL-r; x=(big)&mem[offset+mr_size(sz)*index]; ptr=(char *)&x->w; align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small); x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align); return x; } flash mirvar_mem(_MIPD_ char *mem,int index) { /* initialize big/flash number from pre-allocated memory */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; return mirvar_mem_variable(mem,index,mr_mip->nib-1); } void set_user_function(_MIPD_ BOOL (*user)(void)) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(111) if (!(mr_mip->active)) { mr_berror(_MIPP_ MR_ERR_NO_MIRSYS); MR_OUT return; } mr_mip->user=user; MR_OUT } #ifndef MR_STATIC #ifndef MR_SIMPLE_IO void set_io_buffer_size(_MIPD_ int len) { int i; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (len<0) return; MR_IN(142) for (i=0;i<mr_mip->IOBSIZ;i++) mr_mip->IOBUFF[i]=0; mr_free(mr_mip->IOBUFF); if (len==0) { MR_OUT return; } mr_mip->IOBSIZ=len; mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ len+1,1); mr_mip->IOBUFF[0]='\0'; MR_OUT } #endif #endif /* Initialise a big from ROM given its fixed length */ BOOL init_big_from_rom(big x,int len,const mr_small *rom,int romsize,int *romptr) { int i; zero(x); x->len=len; for (i=0;i<len;i++) { if (*romptr>=romsize) return FALSE; #ifdef MR_AVR x->w[i]=pgm_read_byte_near(&rom[*romptr]); #else x->w[i]=rom[*romptr]; #endif (*romptr)++; } mr_lzero(x); return TRUE; } /* Initialise an elliptic curve point from ROM */ BOOL init_point_from_rom(epoint *P,int len,const mr_small *rom,int romsize,int *romptr) { if (!init_big_from_rom(P->X,len,rom,romsize,romptr)) return FALSE; if (!init_big_from_rom(P->Y,len,rom,romsize,romptr)) return FALSE; P->marker=MR_EPOINT_NORMALIZED; return TRUE; } #ifdef MR_GENERIC_AND_STATIC miracl *mirsys(miracl *mr_mip,int nd,mr_small nb) #else miracl *mirsys(int nd,mr_small nb) #endif { /* Initialize MIRACL system to * * use numbers to base nb, and * * nd digits or (-nd) bytes long */ /* In these cases mr_mip is passed as the first parameter */ #ifdef MR_GENERIC_AND_STATIC return mirsys_basic(mr_mip,nd,nb); #endif #ifdef MR_GENERIC_MT #ifndef MR_STATIC miracl *mr_mip=mr_first_alloc(); return mirsys_basic(mr_mip,nd,nb); #endif #endif /* In these cases mr_mip is a "global" pointer and the mip itself is allocated from the heap. In fact mr_mip (and mip) may be thread specific if some multi-threading scheme is implemented */ #ifndef MR_STATIC #ifdef MR_WINDOWS_MT miracl *mr_mip=mr_first_alloc(); TlsSetValue(mr_key,mr_mip); #endif #ifdef MR_UNIX_MT miracl *mr_mip=mr_first_alloc(); pthread_setspecific(mr_key,mr_mip); #endif #ifdef MR_OPENMP_MT mr_mip=mr_first_alloc(); #endif #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_OPENMP_MT mr_mip=mr_first_alloc(); #endif #endif #endif #endif #ifndef MR_GENERIC_MT mr_mip=get_mip(); #endif return mirsys_basic(mr_mip,nd,nb); } miracl *mirsys_basic(miracl *mr_mip,int nd,mr_small nb) { #ifndef MR_NO_RAND int i; #endif mr_small b,nw; #ifdef MR_FP mr_small dres; #endif if (mr_mip==NULL) return NULL; #ifndef MR_STRIPPED_DOWN mr_mip->depth=0; mr_mip->trace[0]=0; mr_mip->depth++; mr_mip->trace[mr_mip->depth]=29; #endif /* digest hardware configuration */ #ifdef MR_NO_STANDARD_IO mr_mip->ERCON=TRUE; #else mr_mip->ERCON=FALSE; #endif #ifndef MR_STATIC mr_mip->logN=0; mr_mip->degree=0; mr_mip->chin.NP=0; #endif mr_mip->user=NULL; mr_mip->same=FALSE; mr_mip->first_one=FALSE; mr_mip->debug=FALSE; mr_mip->AA=0; #ifndef MR_AFFINE_ONLY mr_mip->coord=MR_NOTSET; #endif #ifdef MR_NOFULLWIDTH if (nb==0) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #endif #ifndef MR_FP #ifdef mr_dltype #ifndef MR_NOFULLWIDTH if (sizeof(mr_dltype)<2*sizeof(mr_utype)) { /* double length type, isn't */ mr_berror(_MIPP_ MR_ERR_NOT_DOUBLE_LEN); MR_OUT return mr_mip; } #endif #endif #endif if (nb==1 || nb>MAXBASE) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #ifdef MR_FP_ROUNDING if (mr_setbase(_MIPP_ nb)==0) { /* unable in fact to control FP rounding */ mr_berror(_MIPP_ MR_ERR_NO_ROUNDING); MR_OUT return mr_mip; } #else mr_setbase(_MIPP_ nb); #endif b=mr_mip->base; #ifdef MR_SIMPLE_BASE if (b!=0) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #endif mr_mip->lg2b=0; mr_mip->base2=1; #ifndef MR_SIMPLE_BASE if (b==0) { #endif mr_mip->lg2b=MIRACL; mr_mip->base2=0; #ifndef MR_SIMPLE_BASE } else while (b>1) { b=MR_DIV(b,2); mr_mip->lg2b++; mr_mip->base2*=2; } #endif #ifdef MR_ALWAYS_BINARY if (mr_mip->base!=mr_mip->base2) { mr_berror(_MIPP_ MR_ERR_NOT_BINARY); MR_OUT return mr_mip; } #endif /* calculate total space for bigs */ /* big -> |int len|small *ptr| alignment space | size in words +1| alignment up to multiple of 4 | */ if (nd>0) nw=MR_ROUNDUP(nd,mr_mip->pack); else nw=MR_ROUNDUP(8*(-nd),mr_mip->lg2b); if (nw<1) nw=1; mr_mip->nib=(int)(nw+1); /* add one extra word for small overflows */ #ifdef MR_STATIC if (nw>MR_STATIC) { mr_berror(_MIPP_ MR_ERR_TOO_BIG); MR_OUT return mr_mip; } #endif /* mr_mip->nib=(int)(nw+1); add one extra word for small overflows */ #ifdef MR_FLASH mr_mip->workprec=mr_mip->nib; mr_mip->stprec=mr_mip->nib; while (mr_mip->stprec>2 && mr_mip->stprec>MR_FLASH/mr_mip->lg2b) mr_mip->stprec=(mr_mip->stprec+1)/2; if (mr_mip->stprec<2) mr_mip->stprec=2; #endif #ifndef MR_DOUBLE_BIG mr_mip->check=ON; #else mr_mip->check=OFF; #endif #ifndef MR_SIMPLE_BASE #ifndef MR_SIMPLE_IO mr_mip->IOBASE=10; /* defaults */ #endif #endif mr_mip->ERNUM=0; mr_mip->NTRY=6; mr_mip->MONTY=ON; #ifdef MR_FLASH mr_mip->EXACT=TRUE; mr_mip->RPOINT=OFF; #endif #ifndef MR_STRIPPED_DOWN mr_mip->TRACER=OFF; #endif #ifndef MR_SIMPLE_IO mr_mip->INPLEN=0; mr_mip->IOBSIZ=MR_DEFAULT_BUFFER_SIZE; #endif #ifdef MR_STATIC mr_mip->PRIMES=mr_small_primes; #else mr_mip->PRIMES=NULL; #ifndef MR_SIMPLE_IO mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ MR_DEFAULT_BUFFER_SIZE+1,1); #endif #endif #ifndef MR_SIMPLE_IO mr_mip->IOBUFF[0]='\0'; #endif mr_mip->qnr=0; mr_mip->cnr=0; mr_mip->TWIST=0; mr_mip->pmod8=0; mr_mip->pmod9=0; /* quick start for rng. irand(.) should be called first before serious use.. */ #ifndef MR_NO_RAND mr_mip->ira[0]=0x55555555; mr_mip->ira[1]=0x12345678; for (i=2;i<NK;i++) mr_mip->ira[i]=mr_mip->ira[i-1]+mr_mip->ira[i-2]+0x1379BDF1; mr_mip->rndptr=NK; mr_mip->borrow=0; #endif mr_mip->nib=2*mr_mip->nib+1; #ifdef MR_FLASH if (mr_mip->nib!=(mr_mip->nib&(MR_MSK))) #else if (mr_mip->nib!=(int)(mr_mip->nib&(MR_OBITS))) #endif { mr_berror(_MIPP_ MR_ERR_TOO_BIG); mr_mip->nib=(mr_mip->nib-1)/2; MR_OUT return mr_mip; } #ifndef MR_STATIC mr_mip->workspace=(char *)memalloc(_MIPP_ MR_SPACES); /* grab workspace */ #else memset(mr_mip->workspace,0,MR_BIG_RESERVE(MR_SPACES)); #endif mr_mip->M=0; mr_mip->fin=FALSE; mr_mip->fout=FALSE; mr_mip->active=ON; mr_mip->nib=(mr_mip->nib-1)/2; /* allocate memory for workspace variables */ #ifndef MR_DOUBLE_BIG mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* double length */ mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,2); mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,3); mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,4); mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,5); mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,6); /* double length */ mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,8); /* double length */ mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,10); /* double length */ mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,12); mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,13); mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,14); mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,15); mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,16); mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,17); mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,18); mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,19); mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,20); mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,21); mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,22); /* double length */ mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,24); mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,25); mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,26); #ifdef MR_KCM mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,27); mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,28); mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,29); /* double length */ #endif #ifdef MR_FLASH #ifdef MR_KCM mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,31); #else mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,27); #endif #endif #else /* w0-w7 are double normal length */ mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* quad length */ mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,4); /* double length */ mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,6); mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,8); mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,10); mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,12); /* quad length */ mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,16); /* quad length */ mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,20); /* quad length */ mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,24); mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,25); mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,26); mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,27); mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,28); mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,29); mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,30); mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,31); mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,32); mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,33); mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,34); /* double length */ mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,36); mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,37); mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,38); #ifdef MR_KCM mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,39); mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,40); mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,41); /* double length */ #endif #ifdef MR_FLASH #ifdef MR_KCM mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,43); #else mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,39); #endif #endif #endif MR_OUT return mr_mip; } #ifndef MR_STATIC /* allocate space for a number of bigs from the heap */ void *memalloc(_MIPD_ int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif return mr_alloc(_MIPP_ mr_big_reserve(num,mr_mip->nib-1),1); } #endif void memkill(_MIPD_ char *mem,int len) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mem==NULL) return; memset(mem,0,mr_big_reserve(len,mr_mip->nib-1)); #ifndef MR_STATIC mr_free(mem); #endif } #ifndef MR_STATIC void mirkill(big x) { /* kill a big/flash variable, that is set it to zero and free its memory */ if (x==NULL) return; zero(x); mr_free(x); } #endif void mirexit(_MIPDO_ ) { /* clean up after miracl */ int i; #ifdef MR_WINDOWS_MT miracl *mr_mip=get_mip(); #endif #ifdef MR_UNIX_MT miracl *mr_mip=get_mip(); #endif #ifdef MR_OPENMP_MT miracl *mr_mip=get_mip(); #endif mr_mip->ERCON=FALSE; mr_mip->active=OFF; memkill(_MIPP_ mr_mip->workspace,MR_SPACES); #ifndef MR_NO_RAND for (i=0;i<NK;i++) mr_mip->ira[i]=0L; #endif #ifndef MR_STATIC #ifndef MR_SIMPLE_IO set_io_buffer_size(_MIPP_ 0); #endif if (mr_mip->PRIMES!=NULL) mr_free(mr_mip->PRIMES); #else #ifndef MR_SIMPLE_IO for (i=0;i<=MR_DEFAULT_BUFFER_SIZE;i++) mr_mip->IOBUFF[i]=0; #endif #endif #ifndef MR_STATIC mr_free(mr_mip); #ifdef MR_WINDOWS_MT TlsSetValue(mr_key, NULL); /* Thank you Thales */ #endif #endif #ifndef MR_GENERIC_MT #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_STATIC mr_mip=NULL; #endif #endif #endif #endif #ifdef MR_OPENMP_MT mr_mip=NULL; #endif } int exsign(flash x) { /* extract sign of big/flash number */ if ((x->len&(MR_MSBIT))==0) return PLUS; else return MINUS; } void insign(int s,flash x) { /* assert sign of big/flash number */ if (x->len==0) return; if (s<0) x->len|=MR_MSBIT; else x->len&=MR_OBITS; } void mr_lzero(big x) { /* strip leading zeros from big number */ mr_lentype s; int m; s=(x->len&(MR_MSBIT)); m=(int)(x->len&(MR_OBITS)); while (m>0 && x->w[m-1]==0) m--; x->len=m; if (m>0) x->len|=s; } #ifndef MR_SIMPLE_IO int getdig(_MIPD_ big x,int i) { /* extract a packed digit */ int k; mr_small n; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif i--; n=x->w[i/mr_mip->pack]; if (mr_mip->pack==1) return (int)n; k=i%mr_mip->pack; for (i=1;i<=k;i++) n=MR_DIV(n,mr_mip->apbase); return (int)MR_REMAIN(n,mr_mip->apbase); } int numdig(_MIPD_ big x) { /* returns number of digits in x */ int nd; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (x->len==0) return 0; nd=(int)(x->len&(MR_OBITS))*mr_mip->pack; while (getdig(_MIPP_ x,nd)==0) nd--; return nd; } void putdig(_MIPD_ int n,big x,int i) { /* insert a digit into a packed word */ int j,k,lx; mr_small m,p; mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(26) s=(x->len&(MR_MSBIT)); lx=(int)(x->len&(MR_OBITS)); m=getdig(_MIPP_ x,i); p=n; i--; j=i/mr_mip->pack; k=i%mr_mip->pack; for (i=1;i<=k;i++) { m*=mr_mip->apbase; p*=mr_mip->apbase; } if (j>=mr_mip->nib && (mr_mip->check || j>=2*mr_mip->nib)) { mr_berror(_MIPP_ MR_ERR_OVERFLOW); MR_OUT return; } x->w[j]=(x->w[j]-m)+p; if (j>=lx) x->len=((j+1)|s); mr_lzero(x); MR_OUT } #endif #ifndef MR_FP void mr_and(big x,big y,big z) { /* z= bitwise logical AND of x and y */ int i,nx,ny,nz,nr; if (x==y) { copy(x,z); return; } #ifdef MR_FLASH nx=mr_lent(x); ny=mr_lent(y); nz=mr_lent(z); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); nz=(z->len&(MR_OBITS)); #endif if (ny<nx) nr=ny; else nr=nx; for (i=0;i<nr;i++) z->w[i]=x->w[i]&y->w[i]; for (i=nr;i<nz;i++) z->w[i]=0; z->len=nr; } void mr_xor(big x,big y,big z) { int i,nx,ny,nz,nr; if (x==y) { copy(x,z); return; } #ifdef MR_FLASH nx=mr_lent(x); ny=mr_lent(y); nz=mr_lent(z); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); nz=(z->len&(MR_OBITS)); #endif if (ny<nx) nr=ny; else nr=nx; for (i=0;i<nr;i++) z->w[i]=x->w[i]^y->w[i]; for (i=nr;i<nz;i++) z->w[i]=0; z->len=nr; } #endif void copy(flash x,flash y) { /* copy x to y: y=x */ int i,nx,ny; mr_small *gx,*gy; if (x==y || y==NULL) return; if (x==NULL) { zero(y); return; } #ifdef MR_FLASH ny=mr_lent(y); nx=mr_lent(x); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); #endif gx=x->w; gy=y->w; for (i=nx;i<ny;i++) gy[i]=0; for (i=0;i<nx;i++) gy[i]=gx[i]; y->len=x->len; } void negify(flash x,flash y) { /* negate a big/flash variable: y=-x */ copy(x,y); if (y->len!=0) y->len^=MR_MSBIT; } void absol(flash x,flash y) { /* y=abs(x) */ copy(x,y); y->len&=MR_OBITS; } BOOL mr_notint(flash x) { /* returns TRUE if x is Flash */ #ifdef MR_FLASH if ((((x->len&(MR_OBITS))>>(MR_BTS))&(MR_MSK))!=0) return TRUE; #endif return FALSE; } void mr_shift(_MIPD_ big x,int n,big w) { /* set w=x.(mr_base^n) by shifting */ mr_lentype s; int i,bl; mr_small *gw=w->w; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; copy(x,w); if (w->len==0 || n==0) return; MR_IN(33) if (mr_notint(w)) mr_berror(_MIPP_ MR_ERR_INT_OP); s=(w->len&(MR_MSBIT)); bl=(int)(w->len&(MR_OBITS))+n; if (bl<=0) { zero(w); MR_OUT return; } if (bl>mr_mip->nib && mr_mip->check) mr_berror(_MIPP_ MR_ERR_OVERFLOW); if (mr_mip->ERNUM) { MR_OUT return; } if (n>0) { for (i=bl-1;i>=n;i--) gw[i]=gw[i-n]; for (i=0;i<n;i++) gw[i]=0; } else { n=(-n); for (i=0;i<bl;i++) gw[i]=gw[i+n]; for (i=0;i<n;i++) gw[bl+i]=0; } w->len=(bl|s); MR_OUT } int size(big x) { /* get size of big number; convert to * * integer - if possible */ int n,m; mr_lentype s; if (x==NULL) return 0; s=(x->len&MR_MSBIT); m=(int)(x->len&MR_OBITS); if (m==0) return 0; if (m==1 && x->w[0]<(mr_small)MR_TOOBIG) n=(int)x->w[0]; else n=MR_TOOBIG; if (s==MR_MSBIT) return (-n); return n; } int mr_compare(big x,big y) { /* compare x and y: =1 if x>y =-1 if x<y * * =0 if x=y */ int m,n,sig; mr_lentype sx,sy; if (x==y) return 0; sx=(x->len&MR_MSBIT); sy=(y->len&MR_MSBIT); if (sx==0) sig=PLUS; else sig=MINUS; if (sx!=sy) return sig; m=(int)(x->len&MR_OBITS); n=(int)(y->len&MR_OBITS); if (m>n) return sig; if (m<n) return -sig; while (m>0) { /* check digit by digit */ m--; if (x->w[m]>y->w[m]) return sig; if (x->w[m]<y->w[m]) return -sig; } return 0; } #ifdef MR_FLASH void fpack(_MIPD_ big n,big d,flash x) { /* create floating-slash number x=n/d from * * big integer numerator and denominator */ mr_lentype s; int i,ld,ln; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(31) ld=(int)(d->len&MR_OBITS); if (ld==0) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW); if (ld==1 && d->w[0]==1) ld=0; if (x==d) mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS); if (mr_notint(n) || mr_notint(d)) mr_berror(_MIPP_ MR_ERR_INT_OP); s=(n->len&MR_MSBIT); ln=(int)(n->len&MR_OBITS); if (ln==1 && n->w[0]==1) ln=0; if ((ld+ln>mr_mip->nib) && (mr_mip->check || ld+ln>2*mr_mip->nib)) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW); if (mr_mip->ERNUM) { MR_OUT return; } copy(n,x); if (n->len==0) { MR_OUT return; } s^=(d->len&MR_MSBIT); if (ld==0) { if (x->len!=0) x->len|=s; MR_OUT return; } for (i=0;i<ld;i++) x->w[ln+i]=d->w[i]; x->len=(s|(ln+((mr_lentype)ld<<MR_BTS))); MR_OUT } void numer(_MIPD_ flash x,big y) { /* extract numerator of x */ int i,ln,ld; mr_lentype s,ly; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; if (mr_notint(x)) { s=(x->len&MR_MSBIT); ly=(x->len&MR_OBITS); ln=(int)(ly&MR_MSK); if (ln==0) { if(s==MR_MSBIT) convert(_MIPP_ (-1),y); else convert(_MIPP_ 1,y); return; } ld=(int)((ly>>MR_BTS)&MR_MSK); if (x!=y) { for (i=0;i<ln;i++) y->w[i]=x->w[i]; for (i=ln;i<mr_lent(y);i++) y->w[i]=0; } else for (i=0;i<ld;i++) y->w[ln+i]=0; y->len=(ln|s); } else copy(x,y); } void denom(_MIPD_ flash x,big y) { /* extract denominator of x */ int i,ln,ld; mr_lentype ly; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; if (!mr_notint(x)) { convert(_MIPP_ 1,y); return; } ly=(x->len&MR_OBITS); ln=(int)(ly&MR_MSK); ld=(int)((ly>>MR_BTS)&MR_MSK); for (i=0;i<ld;i++) y->w[i]=x->w[ln+i]; if (x==y) for (i=0;i<ln;i++) y->w[ld+i]=0; else for (i=ld;i<mr_lent(y);i++) y->w[i]=0; y->len=ld; } #endif unsigned int igcd(unsigned int x,unsigned int y) { /* integer GCD, returns GCD of x and y */ unsigned int r; if (y==0) return x; while ((r=x%y)!=0) x=y,y=r; return y; } unsigned long lgcd(unsigned long x,unsigned long y) { /* long GCD, returns GCD of x and y */ unsigned long r; if (y==0) return x; while ((r=x%y)!=0) x=y,y=r; return y; } unsigned int isqrt(unsigned int num,unsigned int guess) { /* square root of an integer */ unsigned int sqr; unsigned int oldguess=guess; if (num==0) return 0; if (num<4) return 1; for (;;) { /* Newtons iteration */ /* sqr=guess+(((num/guess)-guess)/2); */ sqr=((num/guess)+guess)/2; if (sqr==guess || sqr==oldguess) { if (sqr*sqr>num) sqr--; return sqr; } oldguess=guess; guess=sqr; } } unsigned long mr_lsqrt(unsigned long num,unsigned long guess) { /* square root of a long */ unsigned long sqr; unsigned long oldguess=guess; if (num==0) return 0; if (num<4) return 1; for (;;) { /* Newtons iteration */ /* sqr=guess+(((num/guess)-guess)/2); */ sqr=((num/guess)+guess)/2; if (sqr==guess || sqr==oldguess) { if (sqr*sqr>num) sqr--; return sqr; } oldguess=guess; guess=sqr; } } mr_small sgcd(mr_small x,mr_small y) { /* integer GCD, returns GCD of x and y */ mr_small r; #ifdef MR_FP mr_small dres; #endif if (y==(mr_small)0) return x; while ((r=MR_REMAIN(x,y))!=(mr_small)0) x=y,y=r; return y; } /* routines to support sliding-windows exponentiation * * in various contexts */ int mr_testbit(_MIPD_ big x,int n) { /* return value of n-th bit of big */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifdef MR_FP mr_small m,a,dres; m=mr_shiftbits((mr_small)1,n%mr_mip->lg2b); a=x->w[n/mr_mip->lg2b]; a=MR_DIV(a,m); if ((MR_DIV(a,2.0)*2.0) != a) return 1; #else if ((x->w[n/mr_mip->lg2b] & ((mr_small)1<<(n%mr_mip->lg2b))) >0) return 1; #endif return 0; } void mr_addbit(_MIPD_ big x,int n) { /* add 2^n to positive x - where you know that bit is zero. Use with care! */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif mr_lentype m=n/mr_mip->lg2b; x->w[m]+=mr_shiftbits((mr_small)1,n%mr_mip->lg2b); if (x->len<m+1) x->len=m+1; } int recode(_MIPD_ big e,int t,int w,int i) { /* recode exponent for Comb method */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif int j,r; r=0; for (j=w-1;j>=0;j--) { r<<=1; r|=mr_testbit(_MIPP_ e,i+j*t); } return r; } int mr_window(_MIPD_ big x,int i,int *nbs,int * nzs,int window_size) { /* returns sliding window value, max. of 5 bits, * * (Note from version 5.23 this can be changed by * * setting parameter window_size. This can be * * a useful space-saver) starting at i-th bit of big x. * * nbs is number of bits processed, nzs is the number of * * additional trailing zeros detected. Returns valid bit * * pattern 1x..x1 with no two adjacent 0's. So 10101 * * will return 21 with nbs=5, nzs=0. 11001 will return 3,* * with nbs=2, nzs=2, having stopped after the first 11..*/ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif int j,r,w; w=window_size; /* check for leading 0 bit */ *nbs=1; *nzs=0; if (!mr_testbit(_MIPP_ x,i)) return 0; /* adjust window size if not enough bits left */ if (i-w+1<0) w=i+1; r=1; for (j=i-1;j>i-w;j--) { /* accumulate bits. Abort if two 0's in a row */ (*nbs)++; r*=2; if (mr_testbit(_MIPP_ x,j)) r+=1; if (r%4==0) { /* oops - too many zeros - shorten window */ r/=4; *nbs-=2; *nzs=2; break; } } if (r%2==0) { /* remove trailing 0 */ r/=2; *nzs=1; (*nbs)--; } return r; } int mr_window2(_MIPD_ big x,big y,int i,int *nbs,int *nzs) { /* two bit window for double exponentiation */ int r,w; BOOL a,b,c,d; w=2; *nbs=1; *nzs=0; /* check for two leading 0's */ a=mr_testbit(_MIPP_ x,i); b=mr_testbit(_MIPP_ y,i); if (!a && !b) return 0; if (i<1) w=1; if (a) { if (b) r=3; else r=2; } else r=1; if (w==1) return r; c=mr_testbit(_MIPP_ x,i-1); d=mr_testbit(_MIPP_ y,i-1); if (!c && !d) { *nzs=1; return r; } *nbs=2; r*=4; if (c) { if (d) r+=3; else r+=2; } else r+=1; return r; } int mr_naf_window(_MIPD_ big x,big x3,int i,int *nbs,int *nzs,int store) { /* returns sliding window value, using fractional windows * * where "store" precomputed values are precalulated and * * stored. Scanning starts at the i-th bit of x. nbs is * * the number of bits processed. nzs is number of * * additional trailing zeros detected. x and x3 (which is * * 3*x) are combined to produce the NAF (non-adjacent * * form). So if x=11011(27) and x3 is 1010001, the LSB is * * ignored and the value 100T0T (32-4-1=27) processed, * * where T is -1. Note x.P = (3x-x)/2.P. This value will * * return +7, with nbs=4 and nzs=1, having stopped after * * the first 4 bits. If it goes too far, it must backtrack * * Note in an NAF non-zero elements are never side by side, * * so 10T10T won't happen. NOTE: return value n zero or * * odd, -21 <= n <= +21 */ int nb,j,r,biggest; /* get first bit */ nb=mr_testbit(_MIPP_ x3,i)-mr_testbit(_MIPP_ x,i); *nbs=1; *nzs=0; if (nb==0) return 0; if (i==0) return nb; biggest=2*store-1; if (nb>0) r=1; else r=(-1); for (j=i-1;j>0;j--) { (*nbs)++; r*=2; nb=mr_testbit(_MIPP_ x3,j)-mr_testbit(_MIPP_ x,j); if (nb>0) r+=1; if (nb<0) r-=1; if (abs(r)>biggest) break; } if (r%2!=0 && j!=0) { /* backtrack */ if (nb>0) r=(r-1)/2; if (nb<0) r=(r+1)/2; (*nbs)--; } while (r%2==0) { /* remove trailing zeros */ r/=2; (*nzs)++; (*nbs)--; } return r; } /* Some general purpose elliptic curve stuff */ BOOL point_at_infinity(epoint *p) { if (p==NULL) return FALSE; if (p->marker==MR_EPOINT_INFINITY) return TRUE; return FALSE; } #ifndef MR_STATIC epoint* epoint_init(_MIPDO_ ) { /* initialise epoint to general point at infinity. */ epoint *p; char *ptr; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; MR_IN(96) /* Create space for whole structure in one heap access */ p=(epoint *)mr_alloc(_MIPP_ mr_esize(mr_mip->nib-1),1); ptr=(char *)p+sizeof(epoint); p->X=mirvar_mem(_MIPP_ ptr,0); p->Y=mirvar_mem(_MIPP_ ptr,1); #ifndef MR_AFFINE_ONLY p->Z=mirvar_mem(_MIPP_ ptr,2); #endif p->marker=MR_EPOINT_INFINITY; MR_OUT return p; } #endif epoint* epoint_init_mem_variable(_MIPD_ char *mem,int index,int sz) { epoint *p; char *ptr; int offset,r; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif offset=0; r=(unsigned long)mem%MR_SL; if (r>0) offset=MR_SL-r; #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) p=(epoint *)&mem[offset+index*mr_esize_a(sz)]; else #endif p=(epoint *)&mem[offset+index*mr_esize(sz)]; ptr=(char *)p+sizeof(epoint); p->X=mirvar_mem_variable(ptr,0,sz); p->Y=mirvar_mem_variable(ptr,1,sz); #ifndef MR_AFFINE_ONLY if (mr_mip->coord!=MR_AFFINE) p->Z=mirvar_mem_variable(ptr,2,sz); #endif p->marker=MR_EPOINT_INFINITY; return p; } epoint* epoint_init_mem(_MIPD_ char *mem,int index) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; return epoint_init_mem_variable(_MIPP_ mem,index,mr_mip->nib-1); } #ifndef MR_STATIC /* allocate space for a number of epoints from the heap */ void *ecp_memalloc(_MIPD_ int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) return mr_alloc(_MIPP_ mr_ecp_reserve_a(num,mr_mip->nib-1),1); else #endif return mr_alloc(_MIPP_ mr_ecp_reserve(num,mr_mip->nib-1),1); } #endif void ecp_memkill(_MIPD_ char *mem,int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mem==NULL) return; #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) memset(mem,0,mr_ecp_reserve_a(num,mr_mip->nib-1)); else #endif memset(mem,0,mr_ecp_reserve(num,mr_mip->nib-1)); #ifndef MR_STATIC mr_free(mem); #endif } #ifndef MR_STATIC void epoint_free(epoint *p) { /* clean up point */ if (p==NULL) return; zero(p->X); zero(p->Y); #ifndef MR_AFFINE_ONLY if (p->marker==MR_EPOINT_GENERAL) zero(p->Z); #endif mr_free(p); } #endif
pr90954.c
/* PR sanitizer/90954 */ /* { dg-do compile } */ /* { dg-options "-fopenmp -fsanitize=undefined" } */ float v; int i; void foo (float x, float y) { #pragma omp atomic v += x / y; } void bar (int x, int y) { #pragma omp atomic i += x / y; } void baz (int x, int y) { #pragma omp atomic i *= (x << y); }
primitives.h
#pragma once #include <vector> #include <cstdint> #include <omp.h> #ifndef CACHE_LINE_ENTRY #define CACHE_LINE_ENTRY (16) #endif using namespace std; /* * Both Primitives Are Inclusive * histogram is for cache-aware thread-local histogram purpose * output should be different from the variables captured in function object f * size is the original size for the flagged prefix sum */ template<typename F> void FlagPrefixSumOMP(vector<uint32_t> &histogram, uint32_t *output, size_t size, F f, int omp_num_threads) { static thread_local int tid = omp_get_thread_num(); // 1st Pass: Histogram. auto avg = size / omp_num_threads; auto it_beg = avg * tid; auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY; histogram[histogram_idx] = 0; auto it_end = tid == omp_num_threads - 1 ? size : avg * (tid + 1); auto prev = 0u; for (auto it = it_beg; it < it_end; it++) { if (f(it)) { histogram[histogram_idx]++; output[it] = prev + 1; } else { output[it] = prev; } prev = output[it]; } #pragma omp barrier // 2nd Pass: single-prefix-sum & Add previous sum. #pragma omp single { for (auto tid = 0; tid < omp_num_threads; tid++) { auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY; auto prev_histogram_idx = (tid) * CACHE_LINE_ENTRY; histogram[histogram_idx] += histogram[prev_histogram_idx]; } } { auto prev_sum = histogram[tid * CACHE_LINE_ENTRY]; for (auto it = it_beg; it < it_end; it++) { output[it] += prev_sum; } #pragma omp barrier } } template<typename F> void InclusivePrefixSumOMP(vector<uint32_t> &histogram, uint32_t *output, size_t size, F f, int omp_num_threads) { static thread_local int tid = omp_get_thread_num(); // 1st Pass: Histogram. auto avg = size / omp_num_threads; auto it_beg = avg * tid; auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY; histogram[histogram_idx] = 0; auto it_end = tid == omp_num_threads - 1 ? size : avg * (tid + 1); auto prev = 0u; for (auto it = it_beg; it < it_end; it++) { auto value = f(it); histogram[histogram_idx] += value; prev += value; output[it] = prev; } #pragma omp barrier // 2nd Pass: single-prefix-sum & Add previous sum. #pragma omp single { for (auto tid = 0; tid < omp_num_threads; tid++) { auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY; auto prev_histogram_idx = (tid) * CACHE_LINE_ENTRY; histogram[histogram_idx] += histogram[prev_histogram_idx]; } } { auto prev_sum = histogram[tid * CACHE_LINE_ENTRY]; for (auto it = it_beg; it < it_end; it++) { output[it] += prev_sum; } #pragma omp barrier } }
softmax-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file softmax-inl.h * \brief */ #ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #define MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #include <algorithm> #include <string> #include <utility> #include <vector> #include <type_traits> #include "../mxnet_op.h" #include "../operator_common.h" #include "../tensor/broadcast_reduce_op.h" namespace mxnet { namespace op { namespace mxnet_op { struct softmax_fwd { template<typename AType> MSHADOW_XINLINE static AType Map(float a, AType b) { return AType(expf(a)/b); } template<typename AType> MSHADOW_XINLINE static AType Map(double a, AType b) { return AType(exp(a)/b); } }; struct log_softmax_fwd { template<typename DType> MSHADOW_XINLINE static float Map(DType a, float b) { return a - logf(b); } template<typename DType> MSHADOW_XINLINE static double Map(DType a, double b) { return a - log(b); } }; template<typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length == nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < M; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t len = static_cast<index_t>(length[i]); index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < len; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } for (index_t j = len; j < M; ++j) { out[base + j*sa] = OType(0.0f); } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } } struct softmax_bwd { template<typename DType, typename AType> MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) { return AType(out * (ograd - sum)); } }; struct log_softmax_bwd { template<typename AType> MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) { return AType(ograd - expf(out)*sum); } template<typename AType> MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) { return AType(ograd - exp(out)*sum); } }; template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd, DType *igrad, IType *length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length != nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); index_t len = static_cast<index_t>(length[i]); AType sum = AType(0); for (index_t j = 0; j < len; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); AType sum = AType(0); for (index_t j = 0; j < M; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } } } // namespace mxnet_op struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<int> dtype; dmlc::optional<bool> use_length; DMLC_DECLARE_PARAMETER(SoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1) .describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(dtype) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(dmlc::optional<int>()) .describe("DType of the output in case this can't be inferred. " "Defaults to the same as input's dtype if not defined (dtype=None)."); DMLC_DECLARE_FIELD(use_length) .set_default(dmlc::optional<bool>(false)) .describe("Whether to use the length input as a mask over the data input."); } bool operator==(const SoftmaxParam& other) const { return this->axis == other.axis && this->temperature == other.temperature && this->dtype == other.dtype && this->use_length == other.use_length; } }; static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.dtype.has_value() && param.dtype.value() != -1; } static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.use_length.value(); } static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value()); type_assign(&(*in_attrs)[0], (*out_attrs)[0]); return true; } else { std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } } static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(out_attrs->size(), 1U); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U); if (param.use_length.value()) { mxnet::TShape& dshape = in_attrs->at(0); mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1); int j = 0; int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1; for (int i = 0; i < dshape.ndim(); ++i) { if (i != axis) { tmp_shape[j++] = dshape[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape); } mxnet::ShapeVector tmp = {in_attrs->at(0)}; return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs); } static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)}; mxnet::ShapeVector dgrad = {out_attrs->at(0)}; bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad); SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]); SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]); SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]); mxnet::ShapeVector length = {in_attrs->at(2)}; mxnet::ShapeVector lgrad = {out_attrs->at(1)}; res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad)); SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]); SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]); return res; } else { return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs); } } else { return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs); } } static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U); int in_dtype = (*in_attrs)[1]; int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2]; TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype); if (softmax_use_length(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2)); } bool inferred_io = (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; bool inferred_length = !softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1); return inferred_io && inferred_length; } else { CHECK_EQ(in_attrs->size(), 2U); int out_dtype = (*in_attrs)[1]; TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; } } static inline std::vector<std::pair<int, int> > SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}}; } } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; } } static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { return softmax_use_length(attrs) ? 4 : 3; } return 2; } static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::string>{"ograd", "data", "length", "output"}; } else { return std::vector<std::string>{"ograd", "data", "output"}; } } else { return std::vector<std::string>{"ograd", "output"}; } } struct SoftmaxFGradient { const char *op_name; std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) const { if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) { return ElemwiseGradUseInOut {op_name}(n, ograds); } else { return ElemwiseGradUseOut {op_name}(n, ograds); } } }; template<typename xpu, typename OP, bool negate = false> void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp || inputs[0].Size() == 0U) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, { int type = kInt32; if (param.use_length.value()) { CHECK(inputs.size() > 1) << "Mask needs to be provided when using softmax with use_length=True."; type = inputs[1].type_flag_; } MXNET_INT32_INT64_TYPE_SWITCH(type, IType, { IType* mask_ptr = nullptr; if (param.use_length.value()) { mask_ptr = inputs[1].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { Softmax<OP, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); } #if MXNET_USE_CUDA struct SoftmaxRTCCompute { std::string OP; bool negate = false; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; struct SoftmaxRTCGradCompute { std::string OP1; std::string OP2; bool negate = false; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; #endif template<typename xpu, typename OP1, typename OP2, bool negate = false> void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (softmax_use_length(attrs)) { MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, { if (req[1] != kNullOp) { mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch( ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>()); } }); } if (req[0] == kNullOp) return; const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32; const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1; out_idx = softmax_use_length(attrs) ? 3 : out_idx; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, { IType * length_ptr = nullptr; if (softmax_use_length(attrs)) { length_ptr = inputs[2].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); }); } } // namespace op } // namespace mxnet namespace std { template<> struct hash<mxnet::op::SoftmaxParam> { size_t operator()(const mxnet::op::SoftmaxParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); ret = dmlc::HashCombine(ret, val.temperature); ret = dmlc::HashCombine(ret, val.dtype); ret = dmlc::HashCombine(ret, val.use_length); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
ordered-4.c
extern #ifdef __cplusplus "C" #endif void abort (void); void foo (int i, char *j) { #pragma omp atomic j[i]++; #pragma omp ordered threads { int t; #pragma omp atomic read t = j[i]; if (t != 3) abort (); if (i > 1) { #pragma omp atomic read t = j[i - 1]; if (t == 2) abort (); } if (i < 127) { #pragma omp atomic read t = j[i + 1]; if (t == 4) abort (); } } #pragma omp atomic j[i]++; } int main () { int i; char j[128]; #pragma omp parallel { #pragma omp for for (i = 0; i < 128; i++) j[i] = 0; #pragma omp for ordered schedule(dynamic, 1) for (i = 0; i < 128; i++) { #pragma omp atomic j[i]++; #pragma omp ordered threads { int t; #pragma omp atomic read t = j[i]; if (t != 1) abort (); if (i > 1) { #pragma omp atomic read t = j[i - 1]; if (t == 0) abort (); } if (i < 127) { #pragma omp atomic read t = j[i + 1]; if (t == 2) abort (); } } #pragma omp atomic j[i]++; } #pragma omp for ordered schedule(static, 1) for (i = 0; i < 128; i++) foo (i, j); } return 0; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// Sections used with #pragma alloc_text. llvm::StringMap<std::tuple<StringRef, SourceLocation>> FunctionToSectionMap; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Set of no-builtin functions listed by \#pragma function. llvm::SmallSetVector<StringRef, 4> MSFunctionNoBuiltins; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in \#pragma weak before /// declared. Rare. May alias another identifier, declared or undeclared. /// /// For aliases, the target identifier is used as a key for eventual /// processing when the target is declared. For the single-identifier form, /// the sole identifier is used as the key. Each entry is a `SetVector` /// (ordered by parse order) of aliases (identified by the alias name) in case /// of multiple aliases to the same undeclared identifier. llvm::MapVector< IdentifierInfo *, llvm::SetVector< WeakInfo, llvm::SmallVector<WeakInfo, 1u>, llvm::SmallDenseSet<WeakInfo, 2u, WeakInfo::DenseMapInfoByAliasOnly>>> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// The C++ "std::source_location::__impl" struct, defined in /// \<source_location>. RecordDecl *StdSourceLocationImplDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; // A context can be nested in both a discarded statement context and // an immediate function context, so they need to be tracked independently. bool InDiscardedStatement; bool InImmediateFunctionContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext), InDiscardedStatement(false), InImmediateFunctionContext(false) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext || (Context == ExpressionEvaluationContext::DiscardedStatement && InImmediateFunctionContext); } bool isDiscardedStatementContext() const { return Context == ExpressionEvaluationContext::DiscardedStatement || (Context == ExpressionEvaluationContext::ImmediateFunctionContext && InDiscardedStatement); } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl *, 2> Pair; public: SpecialMemberOverloadResult() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S); ~FPFeaturesStateRAII(); FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; LangOptions::FPEvalMethodKind OldEvalMethod; SourceLocation OldFPPragmaLocation; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; private: Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; bool WarnedDarwinSDKInfoMissing = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool IsPartition = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// The global module fragment of the current translation unit. clang::Module *GlobalModuleFragment = nullptr; /// The modules we imported directly. llvm::SmallPtrSet<clang::Module *, 8> DirectModuleImports; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module unit whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } /// Helper function to judge if we are in module purview. /// Return false if we are not in a module. bool isCurrentModulePurview() const { return getCurrentModule() ? getCurrentModule()->isModulePurview() : false; } /// Enter the scope of the global module. Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit); /// Leave the scope of the global module. void PopGlobalModuleFragment(); VisibleModuleSet VisibleModules; /// Cache for module units which is usable for current module. llvm::DenseSet<const Module *> UsableModuleUnitsCache; bool isUsableModule(const Module *M); public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } bool isModuleDirectlyImported(const Module *M) { return DirectModuleImports.contains(M); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization, bool DeclIsDefn); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); enum class FnBodyKind { /// C++ [dcl.fct.def.general]p1 /// function-body: /// ctor-initializer[opt] compound-statement /// function-try-block Other, /// = default ; Default, /// = delete ; Delete }; void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr, FnBodyKind BodyKind = FnBodyKind::Other); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr, FnBodyKind BodyKind = FnBodyKind::Other); void SetFunctionBodyKind(Decl *D, SourceLocation Loc, FnBodyKind BodyKind); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' PartitionInterface, ///< 'export module X:Y;' PartitionImplementation, ///< 'module X:Y;' }; /// An enumeration to represent the transition of states in parsing module /// fragments and imports. If we are not parsing a C++20 TU, or we find /// an error in state transition, the state is set to NotACXX20Module. enum class ModuleImportState { FirstDecl, ///< Parsing the first decl in a TU. GlobalFragment, ///< after 'module;' but before 'module X;' ImportAllowed, ///< after 'module X;' but before any non-import decl. ImportFinished, ///< after any non-import decl. PrivateFragment, ///< after 'module :private;'. NotACXX20Module ///< Not a C++20 TU, or an invalid state was found. }; private: /// The parser has begun a translation unit to be compiled as a C++20 /// Header Unit, helper for ActOnStartOfTranslationUnit() only. void HandleStartOfHeaderUnit(); public: /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, ModuleIdPath Partition, ModuleImportState &ImportState); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module toplevel name as an access path. /// \param IsPartition If the name is for a partition. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path, bool IsPartition = false); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); void ActOnObjCContainerStartDefinition(ObjCContainerDecl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(ObjCContainerDecl *ObjCCtx); void ActOnObjCReenterContainerContext(ObjCContainerDecl *ObjCCtx); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); /// If \p AllowLambda is true, treat lambda as function. DeclContext *getFunctionLevelDeclContext(bool AllowLambda = false); /// Returns a pointer to the innermost enclosing function, or nullptr if the /// current context is not inside a function. If \p AllowLambda is true, /// this can return the call operator of an enclosing lambda, otherwise /// lambdas are skipped when looking for an enclosing function. FunctionDecl *getCurFunctionDecl(bool AllowLambda = false); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); HLSLNumThreadsAttr *mergeHLSLNumThreadsAttr(Decl *D, const AttributeCommonInfo &AL, int X, int Y, int Z); HLSLShaderAttr *mergeHLSLShaderAttr(Decl *D, const AttributeCommonInfo &AL, HLSLShaderAttr::ShaderType ShaderType); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld, bool NewDeclIsDefn); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr, bool Reversed = false); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base, MultiExprArg Args); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false, bool ForceNoCPlusPlus = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A, bool SkipArgCountCheck = false); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A, bool SkipArgCountCheck = false); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI, const Expr *E, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, bool &HasDefault, bool &HasCommas, SmallVectorImpl<StringRef> &Strings); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributes &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributes &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, bool AllowRecovery = false); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false, ArrayRef<const Expr *> StopAt = None); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, MultiExprArg ArgExprs, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN(), __builtin_source_location() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, QualType ResultTy, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, unsigned LambdaDependencyKind, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; /// Introduce the instantiated function parameters into the local /// instantiation scope, and set the parameter names to those used /// in the template. bool addInstantiatedParametersToScope( FunctionDecl *Function, const FunctionDecl *PatternDecl, LocalInstantiationScope &Scope, const MultiLevelTemplateArgumentList &TemplateArgs); public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, const ParsedAttributesView &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, const SourceRange &, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, ArrayRef<Expr *> ArgExprs, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occurred, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, const AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false, bool AllowOrderingByConstraints = true); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs( const NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// We are building an implied call from __builtin_dump_struct. The /// arguments are in CallArgs. BuildingBuiltinDumpStructCall, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; union { /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// The list of argument expressions in a synthesized call. const Expr *const *CallArgs; }; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The number of expressions in CallArgs. unsigned NumCallArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isImmediateFunctionContext(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); ObjCInterfaceDecl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); ObjCProtocolDecl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); ObjCCategoryDecl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); ObjCImplementationDecl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); ObjCCategoryImplDecl *ActOnStartCategoryImplementation( SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on well-formed \#pragma alloc_text(). void ActOnPragmaMSAllocText( SourceLocation PragmaLocation, StringRef Section, const SmallVector<std::tuple<IdentifierInfo *, SourceLocation>> &Functions); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } void ActOnPragmaFPEvalMethod(SourceLocation Loc, LangOptions::FPEvalMethodKind Value); /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, const IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, const WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Call on well formed \#pragma function. void ActOnPragmaMSFunction(SourceLocation Loc, const llvm::SmallVectorImpl<StringRef> &NoBuiltins); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Only called on function definitions; if there is a `#pragma alloc_text` /// that decides which code section the function should be in, add /// attribute section to the function. void AddSectionMSAllocText(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based no_builtin, consider marking the function /// with attribute no_builtin. void AddImplicitMSFunctionNoBuiltinAttr(FunctionDecl *FD); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildOperatorCoawaitLookupExpr(Scope *S, SourceLocation Loc); ExprResult BuildOperatorCoawaitCall(SourceLocation Loc, Expr *E, UnresolvedLookupExpr *Lookup); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *Operand, Expr *Awaiter, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *Operand, UnresolvedLookupExpr *Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive with indirect clause. Optional<Expr *> Indirect; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, DeclareTargetContextInfo &DTCI); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true if currently in OpenMP task with untied clause context. bool isInOpenMPTaskUntiedContext() const; /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp teams loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams loop' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetTeamsGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPParallelGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel loop' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetParallelGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'compare' clause. OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'has_device_addr' clause. OMPClause *ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType CheckSizelessVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion, bool AllowBoolOperation, bool ReportInvalid); QualType GetSignedVectorType(QualType V); QualType GetSignedSizelessVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckSizelessVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); // type checking for sizeless vector binary operators. QualType CheckSizelessVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, ArithConvKind OperationKind); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; QualType PreferredConditionType(ConditionKind K) const { return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy; } ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK, bool MissingOK = false); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc, bool Braced); QualType ProduceCtorInitMemberSignatureHelp( Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc, bool Braced); QualType ProduceTemplateArgumentSignatureHelp( TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall); bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, BinaryOperatorKind Opcode); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const SourceLocation CallExprLoc, const NamedDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; ObjCContainerDecl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); void deepTypeCheckForSYCLDevice(SourceLocation UsedAt, llvm::DenseSet<QualType> Visited, ValueDecl *DeclToCheck); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 64 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 2 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
example4.c
// calculation example of far-field intensity distributions. // radar chart is output for a distant scattering field. #include "emf_mie_ms.h" int main(int argc,char *argv[]) { MSPD msp; FILE *fp1,*fp2; double complex e[3],h[3]; double th,ph,phd,dthd,dthr,dphr,dphd,ra,r[3],*ie,*ih,mf,iemax,ihmax; int i,j,sn; if(argc!=2 && argc!=4){ printf("Usage : %s datafile_name [sampling_number multplier_factor](optional)\n",argv[0]); printf("default sampling number 360, multiplier factor 2000 (radius = 2000*lambda0)\n"); exit(0); } else if(argc==4){ sn=atoi(argv[2]); mf=atof(argv[3]); } else{ sn=360; mf=2000.0; } read_dat_ms(argv[1],&msp); // read data file print_data_ms(&msp); // print data ra=mf*msp.bm.lambda_0; // radius for calculation point dthd=360.0/(double)sn; // delta theta [degree] dthr=2.0*M_PI/(double)sn; // delta theta [radian] dphd=180.0/(double)sn; dphr=1.0*M_PI/(double)sn; ie=(double *)m_alloc2(sn+1,sizeof(double),"example2.c,ie"); ih=(double *)m_alloc2(sn+1,sizeof(double),"example2.c,ih"); // x=0 plane, th=0 : +z-axis, th=270 : +y-axis if((fp1=fopen("fsIe_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","## x=0 plane, theta=0 : +z-axis, theta=270 : +y-axis "); fprintf(fp1,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp1,"%s\n","# theta electric_field_intensity normalized_intensity"); if((fp2=fopen("fsIh_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","## x=0 plane, theta=0 : +z-axis, theta=270 : +y-axis "); fprintf(fp2,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp2,"%s\n","# theta magnetic_field_intensity normalized_intensity"); iemax=0.0; ihmax=0.0; for(i=0;i<sn;i++){ th=0.5*dthr+(double)i*dthr; r[0]=0.0; r[1]=-ra*sin(th); r[2]= ra*cos(th); scattered_EH_ms(e,h,r,&msp); // scattered field ie[i]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[i]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); if(ie[i]>iemax) iemax=ie[i]; if(ih[i]>ihmax) ihmax=ih[i]; } for(i=0;i<sn;i++){ th=0.5*dthd+(double)i*dthd; fprintf(fp1,"%g %15.14e %15.14e\n",th,ie[i],ie[i]/iemax); fprintf(fp2,"%g %15.14e %15.14e\n",th,ih[i],ih[i]/ihmax); } fclose(fp1); fclose(fp2); // y=0 plane, th=0 : +z-axis, th=90 : +x-axis if((fp1=fopen("fsIe_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","## y=0 plane, theta=0 : +z-axis, theta=90 : +x-axis "); fprintf(fp1,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp1,"%s\n","# theta electric_field_intensity normalized_intensity"); if((fp2=fopen("fsIh_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","## x=0 plane, theta=0 : +z-axis, theta=90 : +x-axis "); fprintf(fp2,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp2,"%s\n","# theta magnetic_field_intensity normalized_intensity"); iemax=0.0; ihmax=0.0; for(i=0;i<sn;i++){ th=0.5*dthr+(double)i*dthr; r[0]=ra*sin(th); r[1]=0.0; r[2]=ra*cos(th); scattered_EH_ms(e,h,r,&msp); // scattered field ie[i]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[i]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); if(ie[i]>iemax) iemax=ie[i]; if(ih[i]>ihmax) ihmax=ih[i]; } for(i=0;i<sn;i++){ th=0.5*dthd+(double)i*dthd; fprintf(fp1,"%g %15.14e %15.14e\n",th,ie[i],ie[i]/iemax); fprintf(fp2,"%g %15.14e %15.14e\n",th,ih[i],ih[i]/ihmax); } fclose(fp1); fclose(fp2); // z=0 plane, th=0 : +x-axis, th=90 : +y-axis if((fp1=fopen("fsIe_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","## z=0 plane, theta=0 : +x-axis, theta=90 : +y-axis "); fprintf(fp1,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp1,"%s\n","# theta electric_field_intensity normalized_intensity"); if((fp2=fopen("fsIh_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","## z=0 plane, theta=0 : +x-axis, theta=90 : +y-axis "); fprintf(fp2,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp2,"%s\n","# theta magnetic_field_intensity normalized_intensity"); iemax=0.0; ihmax=0.0; for(i=0;i<sn;i++){ th=0.5*dthr+(double)i*dthr; r[0]=ra*cos(th); r[1]=ra*sin(th); r[2]=0.0; scattered_EH_ms(e,h,r,&msp); // scattered field ie[i]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[i]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); if(ie[i]>iemax) iemax=ie[i]; if(ih[i]>ihmax) ihmax=ih[i]; } for(i=0;i<sn;i++){ th=0.5*dthd+(double)i*dthd; fprintf(fp1,"%g %15.14e %15.14e\n",th,ie[i],ie[i]/iemax); fprintf(fp2,"%g %15.14e %15.14e\n",th,ih[i],ih[i]/ihmax); } fclose(fp1); fclose(fp2); // 3d plot if((fp1=fopen("fsIe_3d.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","## 3d plot, x=r*sin(theta)*cos(phi), y=r*sin(theta)*sin(phi), z=r*cos(theta), r=multiplier_factor*lambda0"); fprintf(fp1,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp1,"%s\n","# theta phi electric_field_intensity"); if((fp2=fopen("fsIh_3d.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","## 3d plot, x=r*sin(theta)*cos(phi), y=r*sin(theta)*sin(phi), z=r*cos(theta), r=multiplier_factor*lambda0"); fprintf(fp2,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp2,"%s\n","# theta phi magnetic_field_intensity"); for(i=0;i<sn;i++){ ph =0.5*dphr+(double)i*dphr; phd=0.5*dphd+(double)i*dphd; #pragma omp parallel for schedule(dynamic) private(th,r,e,h) for(j=0;j<=sn;j++){ th=(double)j*dthr; r[0]=ra*sin(ph)*cos(th); r[1]=ra*sin(ph)*sin(th); r[2]=ra*cos(ph); scattered_EH_ms(e,h,r,&msp); // scattered field ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<=sn;j++){ th=(double)j*dthd; fprintf(fp1,"%g %g %15.14e\n",phd,th,ie[j]); fprintf(fp2,"%g %g %15.14e\n",phd,th,ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); free(ie); free(ih); free_ms(&msp); return 0; }
displacement_lagrangemultiplier_mixed_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierMixedContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierMixedContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierMixedContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierMixedContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; /// The epsilon tolerance definition static constexpr double Tolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param RotRatioTolerance Relative tolerance for rotation residual error * @param RotAbsTolerance Absolute tolerance for rotation residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierMixedContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType RotRatioTolerance, const TDataType RotAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The rotation solution mRotRatioTolerance = RotRatioTolerance; mRotAbsTolerance = RotAbsTolerance; // The normal contact solution mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierMixedContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } //* Copy constructor. DisplacementLagrangeMultiplierMixedContactCriteria( DisplacementLagrangeMultiplierMixedContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mRotRatioTolerance(rOther.mRotRatioTolerance) ,mRotAbsTolerance(rOther.mRotAbsTolerance) ,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm) ,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) { } /// Destructor. ~DisplacementLagrangeMultiplierMixedContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0, rot_residual_solution_norm = 0.0, lm_solution_norm = 0.0, lm_increase_norm = 0.0; IndexType disp_dof_num(0),rot_dof_num(0),lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType residual_dof_value = 0.0, dof_value = 0.0, dof_incr = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Auxiliar displacement DoF check const std::function<bool(const VariableData&)> check_without_rot = [](const VariableData& rCurrVar) -> bool {return true;}; const std::function<bool(const VariableData&)> check_with_rot = [](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));}; const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot; // Loop over Dofs #pragma omp parallel for firstprivate(dof_id, residual_dof_value, dof_value, dof_incr) reduction(+:disp_residual_solution_norm, rot_residual_solution_norm, lm_solution_norm, lm_increase_norm, disp_dof_num, rot_dof_num, lm_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id] == 1) { const auto& r_curr_var = it_dof->GetVariable(); if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; lm_solution_norm += std::pow(dof_value, 2); lm_increase_norm += std::pow(dof_incr, 2); ++lm_dof_num; } else if ((*p_check_disp)(r_curr_var)) { residual_dof_value = rb[dof_id]; disp_residual_solution_norm += std::pow(residual_dof_value, 2); ++disp_dof_num; } else { // We will assume is rotation dof KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl; residual_dof_value = rb[dof_id]; rot_residual_solution_norm += std::pow(residual_dof_value, 2); ++rot_dof_num; } } } } if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; mDispCurrentResidualNorm = disp_residual_solution_norm; mRotCurrentResidualNorm = rot_residual_solution_norm; const TDataType lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0; const TDataType lm_abs = std::sqrt(lm_increase_norm)/static_cast<TDataType>(lm_dof_num); TDataType residual_disp_ratio, residual_rot_ratio; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm < Tolerance) ? 1.0 : disp_residual_solution_norm; residual_disp_ratio = 1.0; if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { mRotInitialResidualNorm = (rot_residual_solution_norm < Tolerance) ? 1.0 : rot_residual_solution_norm; residual_rot_ratio = 1.0; } mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; // We calculate the ratio of the rotations residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm; // We calculate the absolute norms TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; TDataType residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } else { r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("MIXED CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "MIXED CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tROTATION: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tLAGRANGE MUL: RATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > lm_ratio) ? residual_disp_ratio : lm_ratio; r_process_info[RESIDUAL_NORM] = (lm_abs > mLMAbsTolerance) ? lm_abs : mLMAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true; const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance); if ( disp_converged && rot_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { // Initialize BaseType::mConvergenceCriteriaIsInitialized = true; // Check rotation dof mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart)); // Initialize header ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table.AddColumn("RT RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Initialize flag mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "displacement_lagrange_multiplier_mixed_contact_criteria", "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "rotation_residual_relative_tolerance" : 1.0e-4, "rotation_residual_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9 })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "displacement_lagrange_multiplier_mixed_contact_criteria"; } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // The displacement solution mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The rotation solution mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble(); mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble(); // The contact solution mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual TDataType mRotInitialResidualNorm; /// The reference norm of the rotation residual TDataType mRotCurrentResidualNorm; /// The current norm of the rotation residual TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM std::vector<int> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierMixedContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H */
ep.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" #include "npbparams.h" /* parameters */ #define MK 16 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 #define EPSILON 1.0e-8 #define A 1220703125.0 #define S 271828183.0 #define TIMERS_ENABLED FALSE /* global variables */ /* common /storage/ */ #include <omp.h> static double x[131072]; static double q[10]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* c This is the serial version of the APP Benchmark 1, c the "embarassingly parallel" benchmark. c c M is the Log_2 of the number of complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of the size of each batch of uniform random c numbers. MK can be set for convenience on a given system, since it does c not affect the results. */ int main(int argc,char **argv) { double Mops; double t1; double t2; double t3; double t4; double x1; double x2; double sx; double sy; double tm; double an; double tt; double gc; double dum[3] = {(1.0), (1.0), (1.0)}; int np; int ierr; int node; int no_nodes; int i; int ik; int kk; int l; int k; int nit; int ierrcode; int no_large_nodes; int np_add; int k_offset; int j; int nthreads = 1; boolean verified; /* character*13 */ char size[14]; /* c Because the size of the problem is too large to store in a 32-bit c integer for some classes, we put it into a string (for printing). c Have to strip off the decimal point put in there by the floating c point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - EP Benchmark\n"); sprintf(size,"%12.0f",(pow(2.0,(28 + 1)))); #pragma omp parallel for private (j) for (j = 13; j >= 1; j += -1) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n",size); verified = 0; /* c Compute the number of "batches" of random number pairs generated c per processor. Adjust if the number of processors does not evenly c divide the total number */ np = 1 << 28 - 16; /* c Call the random number generator functions and initialize c the x-array to reduce the effects of paging on the timings. c Also, call all mathematical functions that are used. Make c sure these initializations cannot be eliminated as dead code. */ vranlc(0,&dum[0],dum[1],&dum[2]); dum[0] = randlc(&dum[1],dum[2]); #pragma omp parallel for private (i) for (i = 0; i <= 131071; i += 1) { x[i] = - 1.0e99; } Mops = log((sqrt((fabs((1.0 > 1.0?1.0 : 1.0)))))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0,&t1,1220703125.0,x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = 1220703125.0; for (i = 1; i <= 17; i += 1) { t2 = randlc(&t1,t1); } an = t1; tt = 271828183.0; gc = 0.0; sx = 0.0; sy = 0.0; #pragma omp parallel for private (i) for (i = 0; i <= 9; i += 1) { q[i] = 0.0; } /* c Each instance of this loop may be performed independently. We compute c the k offsets separately to take into account the fact that some nodes c have more numbers to generate than others */ k_offset = - 1; { double t1; double t2; double t3; double t4; double x1; double x2; int kk; int i; int ik; int l; /* private copy of q[0:NQ-1] */ double qq[10]; #pragma omp parallel for private (i) for (i = 0; i <= 9; i += 1) { qq[i] = 0.0; } //#pragma omp parallel for copyin(x, qq) private(x1, x2, t1, t2, t3, t4, ik, kk, i, l) reduction(+:sx) reduction(+:sy) for (k = 1; k <= np; k += 1) { kk = k_offset + k; t1 = 271828183.0; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i += 1) { ik = kk / 2; if (2 * ik != kk) t3 = randlc(&t1,t2); if (ik == 0) break; t3 = randlc(&t2,t2); kk = ik; } /* Compute uniform pseudorandom numbers. */ if (0 == 1) timer_start(3); vranlc(2 * (1 << 16),&t1,1220703125.0,x - 1); if (0 == 1) timer_stop(3); /* c Compute Gaussian deviates by acceptance-rejection method and c tally counts in concentric square annuli. This loop is not c vectorizable. */ if (0 == 1) timer_start(2); for (i = 0; i <= 65535; i += 1) { x1 = 2.0 * x[2 * i] - 1.0; x2 = 2.0 * x[2 * i + 1] - 1.0; t1 = x1 * x1 + x2 * x2; if (t1 <= 1.0) { t2 = sqrt(- 2.0 * log(t1) / t1); /* Xi */ t3 = x1 * t2; /* Yi */ t4 = x2 * t2; l = ((fabs(t3) > fabs(t4)?fabs(t3) : fabs(t4))); /* counts */ qq[l] += 1.0; /* sum of Xi */ sx = sx + t3; /* sum of Yi */ sy = sy + t4; } } if (0 == 1) timer_stop(2); } { #pragma omp parallel for private (i) for (i = 0; i <= 9; i += 1) { q[i] += qq[i]; } } //#if defined(_OPENMP) // nthreads = omp_get_num_threads(); //#endif /* _OPENMP */ /* end of parallel region */ } #pragma omp parallel for private (i) reduction (+:gc) for (i = 0; i <= 9; i += 1) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (28 == 24) { if (fabs((sx - - 3.247834652034740e3) / sx) <= 1.0e-8 && fabs((sy - - 6.958407078382297e3) / sy) <= 1.0e-8) { verified = 1; } } else if (28 == 25) { if (fabs((sx - - 2.863319731645753e3) / sx) <= 1.0e-8 && fabs((sy - - 6.320053679109499e3) / sy) <= 1.0e-8) { verified = 1; } } else if (28 == 28) { if (fabs((sx - - 4.295875165629892e3) / sx) <= 1.0e-8 && fabs((sy - - 1.580732573678431e4) / sy) <= 1.0e-8) { verified = 1; } } else if (28 == 30) { if (fabs((sx - 4.033815542441498e4) / sx) <= 1.0e-8 && fabs((sy - - 2.660669192809235e4) / sy) <= 1.0e-8) { verified = 1; } } else if (28 == 32) { if (fabs((sx - 4.764367927995374e4) / sx) <= 1.0e-8 && fabs((sy - - 8.084072988043731e4) / sy) <= 1.0e-8) { verified = 1; } } Mops = pow(2.0,(28 + 1)) / tm / 1000000.0; printf("EP Benchmark Results: \nCPU Time = %10.4f\nN = 2^%5d\nNo. Gaussian Pairs = %15.0f\nSums = %25.15e %25.15e\nCounts:\n",tm,28,gc,sx,sy); for (i = 0; i <= 9; i += 1) { printf("%3d %15.0f\n",i,q[i]); } c_print_results("EP",'A',28 + 1,0,0,nit,nthreads,tm,Mops,"Random numbers generated",verified,"3.0 structured","14 Jan 2020","(none)","(none)","-lm","(none)","(none)","(none)","randdp"); if (0 == 1) { printf("Total time: %f",(timer_read(1))); printf("Gaussian pairs: %f",(timer_read(2))); printf("Random numbers: %f",(timer_read(3))); } }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator( llvm::function_ref<void(const Designation &)> CodeCompleteCB); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, SourceLocation *LParenLoc = nullptr, SourceLocation *RParenLoc = nullptr); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); /// Parses intelfpga:: and clang:: loop attributes if the language is SYCL bool MaybeParseSYCLLoopAttributes(ParsedAttributes &Attrs) { if (getLangOpts().SYCLIsDevice || getLangOpts().SYCLIsHost) return ParseSYCLLoopAttributes(Attrs); return true; } bool ParseSYCLLoopAttributes(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
symv_c_csr_u_lo_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include<stdlib.h> static alphasparse_status_t symv_s_csr_u_lo_conj_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mule(y[i], beta); alpha_madde(y[i], alpha, x[i]); } ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT); for(ALPHA_INT i = 0; i < num_threads; i++) { y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); memset(y_local[i], '\0', sizeof(ALPHA_Number) * m); } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < m; ++i) { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_Number tmp; for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai) { const ALPHA_INT col = A->col_indx[ai]; if(col >= i) { continue; } else { alpha_setzero(tmp); cmp_conj(tmp, A->values[ai]); alpha_mul(tmp, alpha, tmp); alpha_madde(y_local[tid][col], tmp, x[i]); alpha_madde(y_local[tid][i], tmp, x[col]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT row = 0; row < m; row++) for(ALPHA_INT i = 0; i < num_threads; i++) alpha_adde(y[row], y_local[i][row]); for(ALPHA_INT i = 0; i < num_threads; i++) { alpha_free(y_local[i]); } alpha_free(y_local); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return symv_s_csr_u_lo_conj_omp(alpha, A, x, beta, y); }
Parallel Programming in C - Merge Sort.c
#include <stdio.h> #include <stdlib.h> #include "omp.h" /* OpenMP implementation example Details of implementation/tutorial can be found here: http://madhugnadig.com/articles/parallel-processing/2017/02/25/parallel-computing-in-c-using-openMP.html */ void mergesort(int a[],int i,int j); void merge(int a[],int i1,int j1,int i2,int j2); int main() { int *a, num, i; scanf("%d",&num); a = (int *)malloc(sizeof(int) * num); for(i=0;i<num;i++) scanf("%d",&a[i]); mergesort(a, 0, num-1); printf("\nSorted array :\n"); for(i=0;i<num;i++) printf("%d ",a[i]); return 0; } void mergesort(int a[],int i,int j) { int mid; if(i<j) { mid=(i+j)/2; #pragma omp parallel sections { #pragma omp section { mergesort(a,i,mid); //left recursion } #pragma omp section { mergesort(a,mid+1,j); //right recursion } } merge(a,i,mid,mid+1,j); //merging of two sorted sub-arrays } } void merge(int a[],int i1,int j1,int i2,int j2) { int temp[1000]; //array used for merging int i,j,k; i=i1; //beginning of the first list j=i2; //beginning of the second list k=0; while(i<=j1 && j<=j2) //while elements in both lists { if(a[i]<a[j]) temp[k++]=a[i++]; else temp[k++]=a[j++]; } while(i<=j1) //copy remaining elements of the first list temp[k++]=a[i++]; while(j<=j2) //copy remaining elements of the second list temp[k++]=a[j++]; //Transfer elements from temp[] back to a[] for(i=i1,j=0;i<=j2;i++,j++) a[i]=temp[j]; }
sharing-1.c
/* { dg-do compile } */ /* { dg-require-effective-target tls } */ #include <stdlib.h> int thrglobalvar; #pragma omp threadprivate (thrglobalvar) int globalvar; const int constvar = 8; int foo (int x) { return x; } int bar (int *x) { return *x; } int main (void) { static int thrlocvar; #pragma omp threadprivate (thrlocvar) static int locvar; static int *p; int i, j, s, l; p = malloc (sizeof (int)); if (p == NULL) return 0; *p = 7; s = 6; l = 0; #pragma omp parallel for /* { dg-error "enclosing 'parallel'" } */ \ default (none) private (p) shared (s) for (i = 0; i < 64; i++) { int k = foo (0); /* Predetermined - private (automatic var declared */ k++; /* in scope of construct). */ thrglobalvar++; /* Predetermined - threadprivate. */ thrlocvar++; /* Predetermined - threadprivate. */ foo (i); /* Predetermined - private (omp for loop variable). */ foo (constvar); /* Predetermined - shared (const qualified type). */ foo (*p); /* *p predetermined - shared (heap allocated */ (*p)++; /* storage). */ bar (p); /* Explicitly determined - private. */ foo (s); /* Explicitly determined - shared. */ globalvar++; /* { dg-error "not specified in" } */ locvar++; /* { dg-error "not specified in" } */ l++; /* { dg-error "not specified in" } */ for (j = 0; j < 2; j++); /* { dg-error "not specified in" } */ } return 0; }
pr59591-2.c
/* PR tree-optimization/59591 */ /* { dg-additional-options "-fopenmp-simd" } */ #ifndef CHECK_H #include "tree-vect.h" #endif extern void abort (void); long long int p[256], r[256], t[256]; int q[256]; __attribute__((noinline, noclone)) void foo (void) { int i; #pragma omp simd safelen(64) for (i = 0; i < 256; i++) if (r[i] > 32LL) t[i] = p[q[i]]; } __attribute__((noinline, noclone)) void bar (void) { int i; for (i = 0; i < 256; i++) { r[i] = ((i >> 2) & (1 << (i & 3))) ? 32 + i : 32 - i; q[i] = r[i] > 32 ? ((i * 7) % 256) : 258 + i; p[i] = i * 11; t[i] = i * 13; } foo (); for (i = 0; i < 256; i++) if ((i >> 2) & (1 << (i & 3))) { if (t[i] != ((i * 7) % 256) * 11) abort (); } else if (t[i] != i * 13) abort (); } #ifndef CHECK_H int main () { check_vect (); bar (); return 0; } #endif
GB_binop__lxor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint8) // A*D function (colscale): GB (_AxD__lxor_uint8) // D*A function (rowscale): GB (_DxB__lxor_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint8) // C=scalar+B GB (_bind1st__lxor_uint8) // C=scalar+B' GB (_bind1st_tran__lxor_uint8) // C=A+scalar GB (_bind2nd__lxor_uint8) // C=A'+scalar GB (_bind2nd_tran__lxor_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_UINT8 || GxB_NO_LXOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hopscotch_hash.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" static HYPRE_Int NearestPowerOfTwo( HYPRE_Int value ) { HYPRE_Int rc = 1; while (rc < value) { rc <<= 1; } return rc; } static void InitBucket(hypre_HopscotchBucket *b) { b->hopInfo = 0; b->hash = HYPRE_HOPSCOTCH_HASH_EMPTY; } static void InitBigBucket(hypre_BigHopscotchBucket *b) { b->hopInfo = 0; b->hash = HYPRE_HOPSCOTCH_HASH_EMPTY; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void InitSegment(hypre_HopscotchSegment *s) { s->timestamp = 0; omp_init_lock(&s->lock); } static void DestroySegment(hypre_HopscotchSegment *s) { omp_destroy_lock(&s->lock); } #endif void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel) { s->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1; if (inCapacity < s->segmentMask + 1) { inCapacity = s->segmentMask + 1; } //ADJUST INPUT ............................ HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity + 4096); HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1; s->bucketMask = adjInitCap - 1; HYPRE_Int i; //ALLOCATE THE SEGMENTS ................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH s->segments = hypre_TAlloc(hypre_HopscotchSegment, s->segmentMask + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= s->segmentMask; ++i) { InitSegment(&s->segments[i]); } #endif s->hopInfo = hypre_TAlloc(hypre_uint, num_buckets, HYPRE_MEMORY_HOST); s->key = hypre_TAlloc(HYPRE_Int, num_buckets, HYPRE_MEMORY_HOST); s->hash = hypre_TAlloc(HYPRE_Int, num_buckets, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for #endif for (i = 0; i < num_buckets; ++i) { s->hopInfo[i] = 0; s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY; } } void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel) { s->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1; if (inCapacity < s->segmentMask + 1) { inCapacity = s->segmentMask + 1; } //ADJUST INPUT ............................ HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity + 4096); HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1; s->bucketMask = adjInitCap - 1; HYPRE_Int i; //ALLOCATE THE SEGMENTS ................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH s->segments = hypre_TAlloc(hypre_HopscotchSegment, s->segmentMask + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= s->segmentMask; ++i) { InitSegment(&s->segments[i]); } #endif s->hopInfo = hypre_TAlloc(hypre_uint, num_buckets, HYPRE_MEMORY_HOST); s->key = hypre_TAlloc(HYPRE_BigInt, num_buckets, HYPRE_MEMORY_HOST); s->hash = hypre_TAlloc(HYPRE_BigInt, num_buckets, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for #endif for (i = 0; i < num_buckets; ++i) { s->hopInfo[i] = 0; s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY; } } void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel) { m->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1; if (inCapacity < m->segmentMask + 1) { inCapacity = m->segmentMask + 1; } //ADJUST INPUT ............................ HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity + 4096); HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1; m->bucketMask = adjInitCap - 1; HYPRE_Int i; //ALLOCATE THE SEGMENTS ................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH m->segments = hypre_TAlloc(hypre_HopscotchSegment, m->segmentMask + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= m->segmentMask; i++) { InitSegment(&m->segments[i]); } #endif m->table = hypre_TAlloc(hypre_HopscotchBucket, num_buckets, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for #endif for (i = 0; i < num_buckets; i++) { InitBucket(&m->table[i]); } } void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel) { m->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1; if (inCapacity < m->segmentMask + 1) { inCapacity = m->segmentMask + 1; } //ADJUST INPUT ............................ HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity + 4096); HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1; m->bucketMask = adjInitCap - 1; HYPRE_Int i; //ALLOCATE THE SEGMENTS ................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH m->segments = hypre_TAlloc(hypre_HopscotchSegment, m->segmentMask + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= m->segmentMask; i++) { InitSegment(&m->segments[i]); } #endif m->table = hypre_TAlloc(hypre_BigHopscotchBucket, num_buckets, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for #endif for (i = 0; i < num_buckets; i++) { InitBigBucket(&m->table[i]); } } void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s ) { hypre_TFree(s->hopInfo, HYPRE_MEMORY_HOST); hypre_TFree(s->key, HYPRE_MEMORY_HOST); hypre_TFree(s->hash, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int i; for (i = 0; i <= s->segmentMask; i++) { DestroySegment(&s->segments[i]); } hypre_TFree(s->segments, HYPRE_MEMORY_HOST); #endif } void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s ) { hypre_TFree(s->hopInfo, HYPRE_MEMORY_HOST); hypre_TFree(s->key, HYPRE_MEMORY_HOST); hypre_TFree(s->hash, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int i; for (i = 0; i <= s->segmentMask; i++) { DestroySegment(&s->segments[i]); } hypre_TFree(s->segments, HYPRE_MEMORY_HOST); #endif } void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m) { hypre_TFree(m->table, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int i; for (i = 0; i <= m->segmentMask; i++) { DestroySegment(&m->segments[i]); } hypre_TFree(m->segments, HYPRE_MEMORY_HOST); #endif } void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m) { hypre_TFree(m->table, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int i; for (i = 0; i <= m->segmentMask; i++) { DestroySegment(&m->segments[i]); } hypre_TFree(m->segments, HYPRE_MEMORY_HOST); #endif } HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len ) { /*HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *ret_array = NULL; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel #endif { HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, n); HYPRE_Int cnt = 0; HYPRE_Int i; for (i = i_begin; i < i_end; i++) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { cnt++; } } hypre_prefix_sum(&cnt, len, prefix_sum_workspace); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp barrier #pragma omp master #endif { ret_array = hypre_TAlloc(HYPRE_Int, *len, HYPRE_MEMORY_HOST); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp barrier #endif for (i = i_begin; i < i_end; i++) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ret_array[cnt++] = s->key[i]; } } } hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); return ret_array; } HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len ) { /*HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_BigInt *ret_array = NULL; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel #endif { HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, n); HYPRE_Int cnt = 0; HYPRE_Int i; for (i = i_begin; i < i_end; i++) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { cnt++; } } hypre_prefix_sum(&cnt, len, prefix_sum_workspace); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp barrier #pragma omp master #endif { ret_array = hypre_TAlloc(HYPRE_BigInt, *len, HYPRE_MEMORY_HOST); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp barrier #endif for (i = i_begin; i < i_end; i++) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ret_array[cnt++] = s->key[i]; } } } hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); return ret_array; }
health.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /**********************************************************************************************/ /* OLDEN parallel C for dynamic structures: compiler, runtime system * and benchmarks * * Copyright (C) 1994-1996 by Anne Rogers (amr@cs.princeton.edu) and * Martin Carlisle (mcc@cs.princeton.edu) * ALL RIGHTS RESERVED. * * OLDEN is distributed under the following conditions: * * You may make copies of OLDEN for your own use and modify those copies. * * All copies of OLDEN must retain our names and copyright notice. * * You may not sell OLDEN or distribute OLDEN in conjunction with a * commercial product or service without the expressed written consent of * Anne Rogers and Martin Carlisle. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE. * */ /******************************************************************* * Health.c : Model of the Colombian Health Care System * *******************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "app-desc.h" #include "bots.h" #include "health.h" /* global variables */ int sim_level; int sim_cities; int sim_population_ratio; int sim_time; int sim_assess_time; int sim_convalescence_time; int32_t sim_seed; float sim_get_sick_p; float sim_convalescence_p; float sim_realloc_p; int sim_pid = 0; int res_population; int res_hospitals; int res_personnel; int res_checkin; int res_village; int res_waiting; int res_assess; int res_inside; float res_avg_stay; /********************************************************** * Handles math routines for health.c * **********************************************************/ float my_rand(int32_t *seed) { int32_t k; int32_t idum = *seed; idum ^= MASK; k = idum / IQ; idum = IA * (idum - k * IQ) - IR * k; idum ^= MASK; if (idum < 0) idum += IM; *seed = idum * IM; return (float) AM * idum; } /******************************************************************** * Handles lists. * ********************************************************************/ void addList(struct Patient **list, struct Patient *patient) { if (*list == NULL) { *list = patient; patient->back = NULL; patient->forward = NULL; } else { struct Patient *aux = *list; while (aux->forward != NULL) aux = aux->forward; aux->forward = patient; patient->back = aux; patient->forward = NULL; } } void removeList(struct Patient **list, struct Patient *patient) { #if 0 struct Patient *aux = *list; if (patient == NULL) return; while((aux != NULL) && (aux != patient)) aux = aux->forward; // Patient not found if (aux == NULL) return; // Removing patient if (aux->back != NULL) aux->back->forward = aux->forward; else *list = aux->forward; if (aux->forward != NULL) aux->forward->back = aux->back; #else if (patient->back != NULL) patient->back->forward = patient->forward; else *list = patient->forward; if (patient->forward != NULL) patient->forward->back = patient->back; #endif } /**********************************************************************/ void allocate_village( struct Village **capital, struct Village *back, struct Village *next, int level, int32_t vid) { int i, population, personnel; struct Village *current, *inext; struct Patient *patient; if (level == 0) *capital = NULL; else { personnel = (int) pow(2, level); population = personnel * sim_population_ratio; /* Allocate Village */ *capital = (struct Village *) malloc(sizeof(struct Village)); /* Initialize Village */ (*capital)->back = back; (*capital)->next = next; (*capital)->level = level; (*capital)->id = vid; (*capital)->seed = vid * (IQ + sim_seed); (*capital)->population = NULL; for(i=0;i<population;i++) { patient = (struct Patient *)malloc(sizeof(struct Patient)); patient->id = sim_pid++; patient->seed = (*capital)->seed; // changes seed for capital: my_rand(&((*capital)->seed)); patient->hosps_visited = 0; patient->time = 0; patient->time_left = 0; patient->home_village = *capital; addList(&((*capital)->population), patient); } /* Initialize Hospital */ (*capital)->hosp.personnel = personnel; (*capital)->hosp.free_personnel = personnel; (*capital)->hosp.assess = NULL; (*capital)->hosp.waiting = NULL; (*capital)->hosp.inside = NULL; (*capital)->hosp.realloc = NULL; omp_init_lock(&(*capital)->hosp.realloc_lock); // Create Cities (lower level) inext = NULL; for (i = sim_cities; i>0; i--) { allocate_village(&current, *capital, inext, level-1, (vid * (int32_t) sim_cities)+ (int32_t) i); inext = current; } (*capital)->forward = current; } } /**********************************************************************/ struct Results get_results(struct Village *village) { struct Village *vlist; struct Patient *p; struct Results t_res, p_res; t_res.hosps_number = 0.0; t_res.hosps_personnel = 0.0; t_res.total_patients = 0.0; t_res.total_in_village = 0.0; t_res.total_waiting = 0.0; t_res.total_assess = 0.0; t_res.total_inside = 0.0; t_res.total_hosps_v = 0.0; t_res.total_time = 0.0; if (village == NULL) return t_res; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; while(vlist) { p_res = get_results(vlist); t_res.hosps_number += p_res.hosps_number; t_res.hosps_personnel += p_res.hosps_personnel; t_res.total_patients += p_res.total_patients; t_res.total_in_village += p_res.total_in_village; t_res.total_waiting += p_res.total_waiting; t_res.total_assess += p_res.total_assess; t_res.total_inside += p_res.total_inside; t_res.total_hosps_v += p_res.total_hosps_v; t_res.total_time += p_res.total_time; vlist = vlist->next; } t_res.hosps_number += 1.0; t_res.hosps_personnel += village->hosp.personnel; // Patients in the village p = village->population; while (p != NULL) { t_res.total_patients += 1.0; t_res.total_in_village += 1.0; t_res.total_hosps_v += (float)(p->hosps_visited); t_res.total_time += (float)(p->time); p = p->forward; } // Patients in hospital: waiting p = village->hosp.waiting; while (p != NULL) { t_res.total_patients += 1.0; t_res.total_waiting += 1.0; t_res.total_hosps_v += (float)(p->hosps_visited); t_res.total_time += (float)(p->time); p = p->forward; } // Patients in hospital: assess p = village->hosp.assess; while (p != NULL) { t_res.total_patients += 1.0; t_res.total_assess += 1.0; t_res.total_hosps_v += (float)(p->hosps_visited); t_res.total_time += (float)(p->time); p = p->forward; } // Patients in hospital: inside p = village->hosp.inside; while (p != NULL) { t_res.total_patients += 1.0; t_res.total_inside += 1.0; t_res.total_hosps_v += (float)(p->hosps_visited); t_res.total_time += (float)(p->time); p = p->forward; } return t_res; } /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ void check_patients_inside(struct Village *village) { struct Patient *list = village->hosp.inside; struct Patient *p; while (list != NULL) { p = list; list = list->forward; p->time_left--; if (p->time_left == 0) { village->hosp.free_personnel++; removeList(&(village->hosp.inside), p); addList(&(village->population), p); } } } /**********************************************************************/ void check_patients_assess_par(struct Village *village) { struct Patient *list = village->hosp.assess; float rand; struct Patient *p; while (list != NULL) { p = list; list = list->forward; p->time_left--; if (p->time_left == 0) { rand = my_rand(&(p->seed)); /* sim_covalescense_p % */ if (rand < sim_convalescence_p) { rand = my_rand(&(p->seed)); /* !sim_realloc_p % or root hospital */ if (rand > sim_realloc_p || village->level == sim_level) { removeList(&(village->hosp.assess), p); addList(&(village->hosp.inside), p); p->time_left = sim_convalescence_time; p->time += p->time_left; } else /* move to upper level hospital !!! */ { village->hosp.free_personnel++; removeList(&(village->hosp.assess), p); omp_set_lock(&(village->hosp.realloc_lock)); addList(&(village->back->hosp.realloc), p); omp_unset_lock(&(village->hosp.realloc_lock)); } } else /* move to village */ { village->hosp.free_personnel++; removeList(&(village->hosp.assess), p); addList(&(village->population), p); } } } } /**********************************************************************/ void check_patients_waiting(struct Village *village) { struct Patient *list = village->hosp.waiting; struct Patient *p; while (list != NULL) { p = list; list = list->forward; if (village->hosp.free_personnel > 0) { village->hosp.free_personnel--; p->time_left = sim_assess_time; p->time += p->time_left; removeList(&(village->hosp.waiting), p); addList(&(village->hosp.assess), p); } else { p->time++; } } } /**********************************************************************/ void check_patients_realloc(struct Village *village) { struct Patient *p, *s; while (village->hosp.realloc != NULL) { p = s = village->hosp.realloc; while (p != NULL) { if (p->id < s->id) s = p; p = p->forward; } removeList(&(village->hosp.realloc), s); put_in_hosp(&(village->hosp), s); } } /**********************************************************************/ void check_patients_population(struct Village *village) { struct Patient *list = village->population; struct Patient *p; float rand; while (list != NULL) { p = list; list = list->forward; /* randomize in patient */ rand = my_rand(&(p->seed)); if (rand < sim_get_sick_p) { removeList(&(village->population), p); put_in_hosp(&(village->hosp), p); } } } /**********************************************************************/ void put_in_hosp(struct Hosp *hosp, struct Patient *patient) { (patient->hosps_visited)++; if (hosp->free_personnel > 0) { hosp->free_personnel--; addList(&(hosp->assess), patient); patient->time_left = sim_assess_time; patient->time += patient->time_left; } else { addList(&(hosp->waiting), patient); } } /**********************************************************************/ #if defined (IF_CUTOFF) void sim_village_par(struct Village *village) { struct Village *vlist; // lowest level returns nothing // only for sim_village first call with village = NULL // recursive call cannot occurs if (village == NULL) return; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; while(vlist) { #pragma omp task untied if((sim_level - village->level) < bots_cutoff_value) sim_village_par(vlist); vlist = vlist->next; } /* Uses lists v->hosp->inside, and v->return */ check_patients_inside(village); /* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */ check_patients_assess_par(village); /* Uses lists v->hosp->waiting, and v->hosp->assess */ check_patients_waiting(village); #pragma omp taskwait /* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */ check_patients_realloc(village); /* Uses list v->population, v->hosp->asses and v->h->waiting */ check_patients_population(village); } #elif defined (MANUAL_CUTOFF) void sim_village_par(struct Village *village) { struct Village *vlist; // lowest level returns nothing // only for sim_village first call with village = NULL // recursive call cannot occurs if (village == NULL) return; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; if ((sim_level-village->level) < bots_cutoff_value) { while(vlist) { #pragma omp task untied sim_village_par(vlist); vlist = vlist->next; } } else { while(vlist) { sim_village_par(vlist); vlist = vlist->next; } } /* Uses lists v->hosp->inside, and v->return */ check_patients_inside(village); /* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */ check_patients_assess_par(village); /* Uses lists v->hosp->waiting, and v->hosp->assess */ check_patients_waiting(village); if ((sim_level-village->level) < bots_cutoff_value) { #pragma omp taskwait } /* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */ check_patients_realloc(village); /* Uses list v->population, v->hosp->asses and v->h->waiting */ check_patients_population(village); } #else void sim_village_par(struct Village *village) { struct Village *vlist; // lowest level returns nothing // only for sim_village first call with village = NULL // recursive call cannot occurs if (village == NULL) return; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; while(vlist) { #pragma omp task untied sim_village_par(vlist); vlist = vlist->next; } /* Uses lists v->hosp->inside, and v->return */ check_patients_inside(village); /* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */ check_patients_assess_par(village); /* Uses lists v->hosp->waiting, and v->hosp->assess */ check_patients_waiting(village); #pragma omp taskwait /* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */ check_patients_realloc(village); /* Uses list v->population, v->hosp->asses and v->h->waiting */ check_patients_population(village); } #endif /**********************************************************************/ void my_print(struct Village *village) { struct Village *vlist; struct Patient *plist; if (village == NULL) return; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; while(vlist) { my_print(vlist); vlist = vlist->next; } plist = village->population; while (plist != NULL) { bots_debug("[pid:%d]",plist->id); plist = plist->forward; } bots_debug("[vid:%d]\n",village->id); } /**********************************************************************/ void read_input_data(char *filename) { FILE *fin; int res; if ((fin = fopen(filename, "r")) == NULL) { bots_message("Could not open sequence file (%s)\n", filename); exit (-1); } res = fscanf(fin,"%d %d %d %d %d %d %ld %f %f %f %d %d %d %d %d %d %d %d %f", &sim_level, &sim_cities, &sim_population_ratio, &sim_time, &sim_assess_time, &sim_convalescence_time, &sim_seed, &sim_get_sick_p, &sim_convalescence_p, &sim_realloc_p, &res_population, &res_hospitals, &res_personnel, &res_checkin, &res_village, &res_waiting, &res_assess, &res_inside, &res_avg_stay ); if ( res == EOF ) { bots_message("Bogus input file (%s)\n", filename); exit(-1); } fclose(fin); // Printing input data bots_message("\n"); bots_message("Number of levels = %d\n", (int) sim_level); bots_message("Cities per level = %d\n", (int) sim_cities); bots_message("Population ratio = %d\n", (int) sim_population_ratio); bots_message("Simulation time = %d\n", (int) sim_time); bots_message("Assess time = %d\n", (int) sim_assess_time); bots_message("Convalescence time = %d\n", (int) sim_convalescence_time); bots_message("Initial seed = %d\n", (int) sim_seed); bots_message("Get sick prob. = %f\n", (float) sim_get_sick_p); bots_message("Convalescence prob. = %f\n", (float) sim_convalescence_p); bots_message("Realloc prob. = %f\n", (float) sim_realloc_p); } int check_village(struct Village *top) { struct Results result = get_results(top); int answer = BOTS_RESULT_SUCCESSFUL; if (res_population != result.total_patients) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_hospitals != result.hosps_number) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_personnel != result.hosps_personnel) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_checkin != result.total_hosps_v) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_village != result.total_in_village) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_waiting != result.total_waiting) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_assess != result.total_assess) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_inside != result.total_inside) answer = BOTS_RESULT_UNSUCCESSFUL; bots_message("\n"); bots_message("Sim. Variables = expect / result\n"); bots_message("Total population = %6d / %6d people\n", (int) res_population, (int) result.total_patients); bots_message("Hospitals = %6d / %6d people\n", (int) res_hospitals, (int) result.hosps_number); bots_message("Personnel = %6d / %6d people\n", (int) res_personnel, (int) result.hosps_personnel); bots_message("Check-in's = %6d / %6d people\n", (int) res_checkin, (int) result.total_hosps_v); bots_message("In Villages = %6d / %6d people\n", (int) res_village, (int) result.total_in_village); bots_message("In Waiting List = %6d / %6d people\n", (int) res_waiting, (int) result.total_waiting); bots_message("In Assess = %6d / %6d people\n", (int) res_assess, (int) result.total_assess); bots_message("Inside Hospital = %6d / %6d people\n", (int) res_inside, (int) result.total_inside); bots_message("Average Stay = %6f / %6f u/time\n", (float) res_avg_stay,(float) result.total_time/result.total_patients); my_print(top); return answer; } /**********************************************************************/ void sim_village_main_par(struct Village *top) { long i; #pragma omp parallel #pragma omp single #pragma omp task untied for (i = 0; i < sim_time; i++) sim_village_par(top); }
convolution_1x1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt); } static void conv1x1s2_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const signed char* r0 = bottom_blob.channel(p); signed char* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { outptr[0] = r0[0]; outptr[1] = r0[2]; outptr[2] = r0[4]; outptr[3] = r0[6]; r0 += 8; outptr += 4; } for (; j + 1 < outw; j += 2) { outptr[0] = r0[0]; outptr[1] = r0[2]; r0 += 4; outptr += 2; } for (; j < outw; j++) { outptr[0] = r0[0]; r0 += 2; outptr += 1; } r0 += tailstep; } } conv1x1s1_sgemm_int8_neon(bottom_blob_shrinked, top_blob, kernel, opt); }
nco_rth_utl.c
/* $Header$ */ /* Purpose: Arithmetic controls and utilities */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License with exceptions described in the LICENSE file */ #include "nco_rth_utl.h" /* Arithmetic controls and utilities */ nco_rth_prc_rnk_enm /* [enm] Ranked precision of arithmetic type */ nco_rth_prc_rnk /* [fnc] Rank precision of arithmetic type */ (const nc_type nco_typ) /* I [enm] netCDF type of operand */ { /* Purpose: Ranked precision of arithmetic type */ switch(nco_typ){ case NC_FLOAT: return nco_rth_prc_rnk_float; case NC_DOUBLE: return nco_rth_prc_rnk_double; case NC_INT: return nco_rth_prc_rnk_int; case NC_SHORT: return nco_rth_prc_rnk_short; case NC_CHAR: return nco_rth_prc_rnk_char; case NC_BYTE: return nco_rth_prc_rnk_byte; case NC_UBYTE: return nco_rth_prc_rnk_ubyte; case NC_USHORT: return nco_rth_prc_rnk_ushort; case NC_UINT: return nco_rth_prc_rnk_uint; case NC_INT64: return nco_rth_prc_rnk_int64; case NC_UINT64: return nco_rth_prc_rnk_uint64; case NC_STRING: return nco_rth_prc_rnk_string; default: nco_dfl_case_nc_type_err(); break; } /* end switch */ /* Some compilers, e.g., SGI cc, need return statement to end non-void functions */ return (nco_rth_prc_rnk_enm)0; } /* end nco_rth_prc_rnk() */ void nco_opr_nrm /* [fnc] Normalization of arithmetic operations for ncra/nces */ (const int nco_op_typ, /* I [enm] Operation type */ const int nbr_var_prc, /* I [nbr] Number of processed variables */ X_CST_PTR_CST_PTR_Y(var_sct,var_prc), /* I [sct] Variables in input file */ X_CST_PTR_CST_PTR_Y(var_sct,var_prc_out), /* I/O [sct] Variables in output file */ const char * const rec_nm_fll, /* I [sng] Full name of record dimension */ const trv_tbl_sct * const trv_tbl) /* I [sct] Traversal table */ { /* Purpose: Normalize appropriate ncra/nces operation (avg, min, max, ttl, ...) on operands Values of var_prc are not altered but are not const because missing values are cast Values of var_prc_out are altered (i.e., normalized) */ int idx=int_CEWI; int nbr_var_prc_cpy; int nco_op_typ_cpy; nco_op_typ_cpy=nco_op_typ; nbr_var_prc_cpy=nbr_var_prc; #ifdef _OPENMP #pragma omp parallel for private(idx) shared(nbr_var_prc_cpy,nco_op_typ_cpy,var_prc,var_prc_out) #endif /* !_OPENMP */ for(idx=0;idx<nbr_var_prc_cpy;idx++){ /* In normalizations over record dimension, only normalize those variables that contain current record dimension */ if(rec_nm_fll){ nco_bool flg_skp=nco_skp_var(var_prc[idx],rec_nm_fll,trv_tbl); if(flg_skp) continue; } /* !rec_nm_fll */ if(var_prc[idx]->is_crd_var){ /* Return linear averages of coordinates unless computing extrema Prevent coordinate variables from encountering nco_var_nrm_sdn() */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val); }else{ /* !var_prc[idx]->is_crd_var */ switch(nco_op_typ_cpy){ case nco_op_avg: /* Normalize sum by tally to create mean */ case nco_op_mebs: /* Normalize sum by tally to create mean */ case nco_op_sqrt: /* Normalize sum by tally to create mean */ case nco_op_sqravg: /* Normalize sum by tally to create mean */ case nco_op_rms: /* Normalize sum of squares by tally to create mean square */ case nco_op_avgsqr: /* Normalize sum of squares by tally to create mean square */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val); break; case nco_op_rmssdn: /* Normalize sum of squares by tally-1 to create mean square for sdn */ (void)nco_var_nrm_sdn(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val); break; case nco_op_min: /* Minimum is already in buffer, do nothing */ case nco_op_max: /* Maximum is already in buffer, do nothing */ case nco_op_mibs: /* Minimum absolute value is already in buffer, do nothing */ case nco_op_mabs: /* Maximum absolute value is already in buffer, do nothing */ break; case nco_op_tabs: /* Total absolute value is already in buffer, stuff missing values into elements with zero tally */ case nco_op_ttl: /* Total is already in buffer, stuff missing values into elements with zero tally */ (void)nco_var_tll_zro_mss_val(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val); break; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* A few operations require additional processing */ switch(nco_op_typ_cpy) { case nco_op_rms: /* Take root of mean of sum of squares to create root mean square */ case nco_op_rmssdn: /* Take root of sdn mean of sum of squares to create root mean square for sdn */ case nco_op_sqrt: /* Take root of mean to create root mean */ (void)nco_var_sqrt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val,var_prc_out[idx]->val); break; case nco_op_sqravg: /* Square mean to create square of the mean (for sdn) */ (void)nco_var_mlt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,var_prc_out[idx]->val); break; case nco_op_avg: case nco_op_ttl: case nco_op_min: case nco_op_max: case nco_op_mibs: case nco_op_mabs: case nco_op_mebs: case nco_op_tabs: case nco_op_avgsqr: break; default: nco_dfl_case_generic_err(); break; } /* end switch */ } /* !var_prc[idx]->is_crd_var */ } /* end (OpenMP parallel for) loop over variables */ } /* end nco_opr_nrm() */ void nco_opr_drv /* [fnc] Intermediate control of arithmetic operations for ncra/nces */ (const long idx_rec, /* I [idx] Index of record (ncra), file (ncfe), or group (ncge) in current operation */ const int nco_op_typ, /* I [enm] Operation type */ const var_sct * const var_prc, /* I [sct] Variable in input file */ var_sct * const var_prc_out) /* I/O [sct] Variable in output file */ { /* Purpose: Perform appropriate ncra/nces operation (avg, min, max, ttl, ...) on operands nco_opr_drv() is called within the record loop of ncra, and within file loop of nces These operations perform some, but not all, of necessary operations for each procedure Most arithmetic operations require additional procedures such as normalization be performed after all files/records have been processed Some operations require special care at initialization This determination is based on the idx_rec variable When idx_rec == 0, these operations may perform special initializations The exact numeric value of idx_rec does not matter What matters is whether it is zero or non-zero */ /* NCO's paradigm is that coordinate variables represent grid axes Reducing such grids to a single-value must be done The most representative value of the grid is the average The total, min, max, rms, etc. of the grid usually makes no sense Users are most interested in the mean grid coordinate 20130112: The same logic applies to CF-style coordinates, e.g., to variables matching CF "bounds", "climatology", and "coordinates" conventions */ if(var_prc->is_crd_var){ (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); return; } /* !var_prc->is_crd_var */ /* var_prc_out->type and var_prc->type should be equal and thus interchangeable var_prc_out->sz and var_prc->sz should be equal and thus interchangeable */ switch (nco_op_typ){ case nco_op_min: /* Minimum */ /* On first loop, simply copy variables from var_prc to var_prc_out */ if(idx_rec == 0) (void)nco_var_copy(var_prc->type,var_prc->sz,var_prc->val,var_prc_out->val); else (void)nco_var_min_bnr(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc_out->val); break; case nco_op_max: /* Maximum */ /* On first loop, simply copy variables from var_prc to var_prc_out */ if(idx_rec == 0) (void)nco_var_copy(var_prc->type,var_prc->sz,var_prc->val,var_prc_out->val); else (void)nco_var_max_bnr(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc_out->val); break; case nco_op_mabs: /* Maximum absolute value */ /* Always take the absolute value of the fresh input Then, on first loop, copy variable from var_prc to var_prc_out like min and max Following loops, do comparative maximum after taking absolute */ (void)nco_var_abs(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val); if(idx_rec == 0) (void)nco_var_copy(var_prc->type,var_prc->sz,var_prc->val,var_prc_out->val); else (void)nco_var_max_bnr(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc_out->val); break; case nco_op_mebs: /* Mean absolute value */ /* Always take the absolute value of the fresh input Every loop add and increment tally like avg, sqrt, sqravg */ (void)nco_var_abs(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val); (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; case nco_op_mibs: /* Mean absolute value */ /* Always take the absolute value of the fresh input Then, on first loop, copy variable from var_prc to var_prc_out like min and max Following loops, do comparative minimum after taking absolute value */ (void)nco_var_abs(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val); if(idx_rec == 0) (void)nco_var_copy(var_prc->type,var_prc->sz,var_prc->val,var_prc_out->val); else (void)nco_var_min_bnr(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc_out->val); break; case nco_op_tabs: /* Total absolute value */ /* Same as ttl but take absolute first */ (void)nco_var_abs(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val); if(idx_rec == 0) (void)nco_var_copy_tll(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->val,var_prc_out->val); else (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; case nco_op_ttl: /* Total */ /* NB: Copying input to output on first loop for nco_op_ttl, in similar manner to nco_op_[max/min], can work However, copying with nco_var_copy() would not change the tally variable, leaving it equal to zero Then an extra step would be necessary to set tally equal to one where missing values were not present Otherwise, e.g., ensemble averages of one file would never have non-zero tallies Hence, use special nco_var_copy_tll() function to copy and change tally only in first loop iteration This way, tally is self-consistent with var_prc_out at all times Moreover, running total must never be set to missing_value, because subsequent additions (with nco_var_add_tll_ncra()) only check new addend (not running sum) against missing value. Hence (as of 20120521) nco_var_copy_tll() specifically resets sum to zero rather than to missing value Parent function (e.g., ncra.c) must post-process ttl buffers nco_op_ttl with nco_var_tll_zro_mss_val() */ if(idx_rec == 0) (void)nco_var_copy_tll(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->val,var_prc_out->val); else (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; case nco_op_avg: /* Average */ case nco_op_sqrt: /* Squareroot will produce the squareroot of the mean */ case nco_op_sqravg: /* Square of the mean */ /* These operations all require subsequent normalization, where degenerate tallies are accounted for Thus, they all call nco_var_add_tll_ncra() every iteration, without special treatment on first iteration */ (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; case nco_op_rms: /* Root mean square */ case nco_op_rmssdn: /* Root mean square normalized by N-1 */ case nco_op_avgsqr: /* Mean square */ /* Square values in var_prc first */ nco_var_mlt(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc->val); /* Sum the squares */ (void)nco_var_add_tll_ncra(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; default: nco_dfl_case_generic_err(); break; /* [enm] Nil or undefined operation type */ } /* end switch */ } /* end nco_opr_drv() */ const char * /* O [enm] Arithmetic operation */ nco_op_typ_cf_sng /* [fnc] Convert arithmetic operation type enum to string */ (const int nco_op_typ) /* I [enm] Arithmetic operation type */ { /* Purpose: Convert arithmetic operation type enum to string for use in CF Cell Methods http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/cf-conventions.html#cell-methods */ switch(nco_op_typ){ case nco_op_avg: return "mean"; break; /* [enm] Average */ case nco_op_min: return "minimum"; break; /* [enm] Minimum value */ case nco_op_max: return "maximum"; break; /* [enm] Maximum value */ case nco_op_ttl: return "sum"; break; /* [enm] Linear sum */ case nco_op_tabs: return "sum_absolute_value"; break; /* [enm] Total absolute value */ case nco_op_mabs: return "maximum_absolute_value"; break; /* [enm] Maximum absolute value */ case nco_op_mebs: return "mean_absolute_value"; break; /* [enm] Mean absolute value */ case nco_op_mibs: return "minimum_absolute_value"; break; /* [enm] Minimum absolute value */ case nco_op_sqravg: return "square_of_mean"; break; /* [enm] Square of mean */ case nco_op_avgsqr: return "variance"; break; /* [enm] Mean of sum of squares */ case nco_op_sqrt: return "square_root_of_mean"; break; /* [enm] Square root of mean */ case nco_op_rms: return "root_mean_square"; break; /* [enm] Root-mean-square (normalized by N) */ case nco_op_rmssdn: return "root_mean_square_nm1"; break; /* [enm] Root-mean square normalized by N-1 */ case nco_op_add: case nco_op_sbt: case nco_op_mlt: case nco_op_dvd: case nco_op_nil: default: nco_dfl_case_generic_err(); return "BROKEN"; /* CEWI */ break; /* [enm] Nil or undefined operation type */ } /* end switch */ } /* end nco_op_typ_cf_sng() */ int /* O [enm] Arithmetic operation */ nco_op_typ_get /* [fnc] Convert user-specified operation into operation key */ (const char * const nco_op_sng) /* I [sng] User-specified operation */ { /* Purpose: Process '-y' command line argument Convert user-specified string to enumerated operation type */ const char fnc_nm[]="nco_op_typ_get()"; /* [sng] Function name */ char *nco_prg_nm; /* [sng] Program name */ int nco_prg_id; /* [enm] Program ID */ nco_prg_nm=nco_prg_nm_get(); /* [sng] Program name */ nco_prg_id=nco_prg_id_get(); /* [enm] Program ID */ if(nco_op_sng == NULL){ /* If nco_op_typ_get() is called when user-specified option string is NULL, then operation type may be implied by program name itself */ if(!strcmp(nco_prg_nm,"ncadd")) return nco_op_add; if(!strcmp(nco_prg_nm,"mpncbo")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"mpncdiff")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncbo")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncdiff")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncsub")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncsubtract")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncmult")) return nco_op_mlt; if(!strcmp(nco_prg_nm,"ncmultiply")) return nco_op_mlt; if(!strcmp(nco_prg_nm,"ncdivide")) return nco_op_dvd; (void)fprintf(stderr,"%s: ERROR %s reports empty user-specified operation string in conjunction with unknown or ambiguous executable name %s\n",nco_prg_nm,fnc_nm,nco_prg_nm); nco_exit(EXIT_FAILURE); } /* endif */ if(!strcmp(nco_op_sng,"avg") || !strcmp(nco_op_sng,"average") || !strcmp(nco_op_sng,"mean")) return nco_op_avg; if(!strcmp(nco_op_sng,"avgsqr")) return nco_op_avgsqr; if(!strcmp(nco_op_sng,"mabs") || !strcmp(nco_op_sng,"maximum_absolute_value")) return nco_op_mabs; if(!strcmp(nco_op_sng,"mebs") || !strcmp(nco_op_sng,"mean_absolute_value")) return nco_op_mebs; if(!strcmp(nco_op_sng,"mibs") || !strcmp(nco_op_sng,"minimum_absolute_value")) return nco_op_mibs; if(!strcmp(nco_op_sng,"max") || !strcmp(nco_op_sng,"maximum")) return nco_op_max; if(!strcmp(nco_op_sng,"min") || !strcmp(nco_op_sng,"minimum")) return nco_op_min; if(!strcmp(nco_op_sng,"rms") || !strcmp(nco_op_sng,"root-mean-square")) return nco_op_rms; if(!strcmp(nco_op_sng,"rmssdn")) return nco_op_rmssdn; if(!strcmp(nco_op_sng,"sqravg")) return nco_op_sqravg; if(!strcmp(nco_op_sng,"sqrt") || !strcmp(nco_op_sng,"square-root")) return nco_op_sqrt; if(!strcmp(nco_op_sng,"total") || !strcmp(nco_op_sng,"ttl") || !strcmp(nco_op_sng,"sum")) return nco_op_ttl; if(!strcmp(nco_op_sng,"tabs") || !strcmp(nco_op_sng,"ttlabs") || !strcmp(nco_op_sng,"sumabs")) return nco_op_tabs; if(!strcmp(nco_op_sng,"add") || !strcmp(nco_op_sng,"+") || !strcmp(nco_op_sng,"addition")) return nco_op_add; if(!strcmp(nco_op_sng,"sbt") || !strcmp(nco_op_sng,"-") || !strcmp(nco_op_sng,"dff") || !strcmp(nco_op_sng,"diff") || !strcmp(nco_op_sng,"sub") || !strcmp(nco_op_sng,"subtract") || !strcmp(nco_op_sng,"subtraction")) return nco_op_sbt; if(!strcmp(nco_op_sng,"dvd") || !strcmp(nco_op_sng,"/") || !strcmp(nco_op_sng,"divide") || !strcmp(nco_op_sng,"division")) return nco_op_dvd; if(!strcmp(nco_op_sng,"mlt") || !strcmp(nco_op_sng,"*") || !strcmp(nco_op_sng,"mult") || !strcmp(nco_op_sng,"multiply") || !strcmp(nco_op_sng,"multiplication")) return nco_op_mlt; (void)fprintf(stderr,"%s: ERROR %s reports unknown user-specified operation type \"%s\"\n",nco_prg_nm,fnc_nm,nco_op_sng); (void)fprintf(stderr,"%s: HINT Valid operation type (op_typ) choices:\n",nco_prg_nm); if(nco_prg_id == ncbo) (void)fprintf(stderr,"addition: add,+,addition\nsubtraction: sbt,-,dff,diff,sub,subtract,subtraction\nmultiplication: mlt,*,mult,multiply,multiplication\ndivision: dvd,/,divide,division\n"); else (void)fprintf(stderr,"min or minimum, max or maximum, mabs or maximum_absolute_value, mebs or mean_absolute_value, mibs or maximum_absolute_value, tabs or ttlabs or sumabs, ttl or total or sum, avg or average or mean, sqrt or square-root, sqravg, avgsqr, rms or root-mean-square, rmssdn\n"); nco_exit(EXIT_FAILURE); return False; /* Statement should not be reached */ } /* end nco_op_typ_get() */ int /* O [enm] Relational operation */ nco_op_prs_rlt /* [fnc] Convert Fortran abbreviation for relational operator into NCO operation key */ (const char * const op_sng) /* I [sng] Fortran representation of relational operator */ { /* Purpose: Convert Fortran abbreviation for relational operator into NCO operation key */ /* Classify the relation */ if(!strcmp(op_sng,"eq")){ return nco_op_eq; }else if(!strcmp(op_sng,"ne")){ return nco_op_ne; }else if(!strcmp(op_sng,"lt")){ return nco_op_lt; }else if(!strcmp(op_sng,"gt")){ return nco_op_gt; }else if(!strcmp(op_sng,"le")){ return nco_op_le; }else if(!strcmp(op_sng,"ge")){ return nco_op_ge; }else{ (void)fprintf(stdout,"%s: ERROR %s not registered in nco_op_prs_rlt()\n",nco_prg_nm_get(),op_sng); nco_exit(EXIT_FAILURE); } /* end else */ /* Some compilers, e.g., SGI cc, need return statement to end non-void functions */ return False; /* Statement should not be reached */ } /* end nco_op_prs_rlt() */ void vec_set /* [fnc] Fill every value of first operand with value of second operand */ (const nc_type type, /* I [enm] netCDF type of operand */ const long sz, /* I [nbr] size (in elements) of operand */ ptr_unn op1, /* I [sct] Values of first operand */ const double op2) /* I [frc] Value to fill vector with */ { /* Purpose: Fill every value of first operand with value of second operand */ long idx; /* Typecast pointer to values before access */ (void)cast_void_nctype(type,&op1); switch(type){ case NC_FLOAT: for(idx=0;idx<sz;idx++) op1.fp[idx]=(float)op2; break; case NC_DOUBLE: for(idx=0;idx<sz;idx++) op1.dp[idx]=op2; break; case NC_INT: for(idx=0;idx<sz;idx++) op1.ip[idx]=(nco_int)lrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_SHORT: for(idx=0;idx<sz;idx++) op1.sp[idx]=(nco_short)lrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_USHORT: for(idx=0;idx<sz;idx++) op1.usp[idx]=(nco_ushort)lrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_UINT: for(idx=0;idx<sz;idx++) op1.uip[idx]=(nco_uint)lrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_INT64: for(idx=0;idx<sz;idx++) op1.i64p[idx]=(nco_int64)llrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_UINT64: for(idx=0;idx<sz;idx++) op1.ui64p[idx]=(nco_uint64)llrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_BYTE: for(idx=0;idx<sz;idx++) op1.bp[idx]=(nco_byte)llrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_UBYTE: for(idx=0;idx<sz;idx++) op1.ubp[idx]=(nco_ubyte)llrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_CHAR: break; /* Do nothing */ case NC_STRING: break; /* Do nothing */ default: nco_dfl_case_nc_type_err(); break; } /* end switch */ /* NB: it is not neccessary to un-typecast pointers to values after access because we have only operated on local copies of them. */ } /* end vec_set() */ void nco_zero_long /* [fnc] Zero all values of long array */ (const long sz, /* I [nbr] Size (in elements) of operand */ long * restrict const op1) /* I/O [nbr] Array to be zeroed */ { /* Purpose: Zero all values of long array */ /* Presumably this old method used until 20050321, and then again after 20120330, is slower than memset() because of pointer de-referencing. However, it does have the virtue of being correct. */ if(op1 == NULL){ (void)fprintf(stdout,"%s: ERROR nco_zero_long() asked to zero NULL pointer\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* endif */ size_t sz_byt; /* [B] Number of bytes in variable buffer */ sz_byt=(size_t)sz*sizeof(long); (void)memset((void *)op1,0,sz_byt); } /* end nco_zero_long() */ void nco_set_long /* [fnc] Set all values of long array */ (const long sz, /* I [nbr] Size (in elements) of operand */ const long val, /* I [] Number to set array to */ long * restrict const op1) /* I/O [nbr] Array to be set */ { /* Purpose: Set all values of long array to input value */ long idx; if(op1 == NULL){ (void)fprintf(stdout,"%s: ERROR nco_set_long() asked to set NULL pointer\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* endif */ for(idx=0;idx<sz;idx++) op1[idx]=val; } /* end nco_set_long() */
blake2sp.c
/* BLAKE2 reference source code package - optimized C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 8 /* blake2sp_init_param defaults to setting the expecting output length from the digest_length parameter block field. In some cases, however, we do not want this, as the output length of these instances is given by inner_length instead. */ static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P ) { int err = blake2s_init_param(S, P); S->outlen = P->inner_length; return err; } static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint64_t offset ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = offset; P->xof_length = 0; P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2sp_init_leaf_param( S, P ); } static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = 0; P->xof_length = 0; P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2s_init_param( S, P ); } int blake2sp_init( blake2sp_state *S, size_t outlen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; size_t i; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2sp_final( blake2sp_state *S, void *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; size_t i; if(out == NULL || outlen < S->outlen) { return -1; } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2S_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES; if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES; blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left ); } blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES ); } for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( S->R, out, S->outlen ); } int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; size_t i; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( keylen > BLAKE2S_KEYBYTES ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */ if( keylen > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > i * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[i], in__, len ); } blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( FS, out, outlen ); } #if defined(BLAKE2SP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( void ) { uint8_t key[BLAKE2S_KEYBYTES]; uint8_t buf[BLAKE2_KAT_LENGTH]; size_t i, step; for( i = 0; i < BLAKE2S_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; /* Test simple API */ for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES ); if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) ) { goto fail; } } /* Test streaming API */ for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) { for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp_state S; uint8_t * p = buf; size_t mlen = i; int err = 0; if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) { goto fail; } while (mlen >= step) { if ( (err = blake2sp_update(&S, p, step)) < 0 ) { goto fail; } mlen -= step; p += step; } if ( (err = blake2sp_update(&S, p, mlen)) < 0) { goto fail; } if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) { goto fail; } if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) { goto fail; } } } puts( "ok" ); return 0; fail: puts("error"); return -1; } #endif
par_csr_matop.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /* RDF: The following prototype already exists in _hypre_parcsr_ls.h, so * something needs to be reorganized here.*/ #ifdef __cplusplus extern "C" { #endif hypre_CSRMatrix * hypre_ExchangeRAPData( hypre_CSRMatrix *RAP_int, hypre_ParCSRCommPkg *comm_pkg_RT); /* reference seems necessary to prevent a problem with the "headers" script... */ #ifdef __cplusplus } #endif /* The following function was formerly part of hypre_ParMatmul but was removed so it can also be used for multiplication of Boolean matrices */ void hypre_ParMatmul_RowSizes( HYPRE_Int ** C_diag_i, HYPRE_Int ** C_offd_i, /*HYPRE_Int ** B_marker,*/ HYPRE_Int * A_diag_i, HYPRE_Int * A_diag_j, HYPRE_Int * A_offd_i, HYPRE_Int * A_offd_j, HYPRE_Int * B_diag_i, HYPRE_Int * B_diag_j, HYPRE_Int * B_offd_i, HYPRE_Int * B_offd_j, HYPRE_Int * B_ext_diag_i, HYPRE_Int * B_ext_diag_j, HYPRE_Int * B_ext_offd_i, HYPRE_Int * B_ext_offd_j, HYPRE_Int * map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_count_diag, jj_count_offd, jj_row_begin_diag, jj_row_begin_offd; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int ii, size, rest; /* First pass begins here. Computes sizes of C rows. Arrays computed: C_diag_i, C_offd_i, B_marker Arrays needed: (11, all HYPRE_Int*) A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_i, B_ext_j, col_map_offd_B, col_map_offd_B, B_offd_i, B_offd_j, B_ext_i, B_ext_j, Scalars computed: C_diag_size, C_offd_size Scalars needed: num_rows_diag_A, num_rows_diag_A, num_cols_offd_A, allsquare, first_col_diag_B, n_cols_B, num_cols_offd_B, num_cols_diag_B */ *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, HYPRE_MEMORY_SHARED); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, HYPRE_MEMORY_SHARED); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ii, i1, jj_row_begin_diag, jj_row_begin_offd, jj_count_diag, jj_count_offd, jj2, i2, jj3, i3) #endif /*for (ii=0; ii < num_threads; ii++)*/ { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) B_marker[i1] = -1; for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[i1] = jj_row_begin_diag; (*C_offd_i)[i1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { (*C_diag_i)[i1] += jj_count_diag; (*C_offd_i)[i1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } } /* end parallel loop */ /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul : multiplies two ParCSRMatrices A and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt last_col_diag_B; HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C=NULL; hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_Int C_diag_size; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_BigInt n_rows_A, n_cols_A; HYPRE_BigInt n_rows_B, n_cols_B; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (n_cols_A != n_rows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /* if globally C=A*B is square and locally C_diag should also be square */ if ( num_rows_diag_A == num_cols_diag_B && n_rows_A == n_cols_B ) { allsquare = 1; } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B -1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=ns; i < ne; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes( /*&C_diag_i, &C_offd_i, &B_marker,*/ &C_diag_i, &C_offd_i, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C ); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, HYPRE_MEMORY_SHARED); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, HYPRE_MEMORY_SHARED); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, HYPRE_MEMORY_SHARED); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, HYPRE_MEMORY_SHARED); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[ns]; jj_count_offd = C_offd_i[ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) { B_marker[i1] = -1; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, n_rows_A, n_cols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt first_row_index = row_starts[0]; #else HYPRE_BigInt first_row_index = row_starts[my_id]; HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); #endif num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int send_proc = send_procs[i]; HYPRE_BigInt send_proc_first_row = row_starts[send_proc]; HYPRE_BigInt send_proc_last_row = row_starts[send_proc + 1]; #endif for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] < 0) len++; #else HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) len++; #endif } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] > 0) len++; #else HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) len++; #endif } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int send_proc = send_procs[i]; HYPRE_BigInt send_proc_first_row = row_starts[send_proc]; HYPRE_BigInt send_proc_last_row = row_starts[send_proc + 1]; #endif for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] < 0) #else if (offd_data[k] < 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) #endif { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] > 0) #else if (offd_data[k] > 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) #endif { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i=0; i < num_recvs; i++) for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) B_ext_i[j+1] += B_ext_i[j]; *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; }; for (i=0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt *row_starts_AT; HYPRE_BigInt *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) AT_tmp_data = hypre_CSRMatrixData(AT_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); for (i=0; i < AT_tmp_i[num_cols_offd]; i++) //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index; for (i=0; i < num_cols_offd; i++) AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose( A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, HYPRE_MEMORY_SHARED); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i=0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i=0; i < num_cols; i++) AT_offd_i[i+1] += AT_offd_i[i]; tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i=0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); AT_big_j = NULL; if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], HYPRE_MEMORY_SHARED); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], HYPRE_MEMORY_SHARED); } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i=0; i < num_sends; i++) { for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k=0; k < AT_buf_i[j]; k++) { if (data) AT_offd_data[index] = AT_buf_data[counter]; AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i=num_cols; i > 0; i--) AT_offd_i[i] = AT_offd_i[i-1]; AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i=1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); else col_map_offd_AT = NULL; for (i=0; i < num_cols_offd_AT; i++) col_map_offd_AT[i] = AT_buf_j[i]; hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); for (i=0; i < counter; i++) AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i], num_cols_offd_AT); hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols,num_cols_offd_AT,counter); hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; #ifdef HYPRE_NO_GLOBAL_PARTITION row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i=0; i < 2; i++) row_starts_AT[i] = col_starts[i]; if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i=0; i < 2; i++) col_starts_AT[i] = row_starts[i]; } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT); #else row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i=0; i < num_procs+1; i++) row_starts_AT[i] = col_starts[i]; if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i=0; i < num_procs+1; i++) col_starts_AT[i] = row_starts[i]; } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[my_id]; first_col_diag_AT = col_starts_AT[my_id]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[my_id+1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[my_id+1]-first_col_diag_AT); #endif AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) hypre_ParCSRMatrixOwnsColStarts(AT) = 0; hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; free(counts); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } free(nodes_marked); free(queue); free(GT_diag_mat); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); free(recv_cnts); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } free(nodes_marked); free(queue); free(pgraph_i); free(pgraph_j); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) free(children); /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; free(edges_marked); if (G_type != 0) { free(G_diag_i); free(G_diag_j); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; #ifdef HYPRE_NO_GLOBAL_PARTITION /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; #else global_nrows = proc_offsets1[nprocs]; global_ncols = proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = proc_offsets1[i]; col_starts[i] = proc_offsets1[i]; } #endif A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) hypre_error(HYPRE_ERROR_GENERIC); /*hypre_printf("WARNING WARNING WARNING\n");*/ diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; free(proc_offsets1); free(proc_offsets2); free(exp_indices); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; free(proc_offsets1); free(proc_offsets2); free(exp_indices); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixCompleteClone(B); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) A_marker[i] = -1; for (i=ns; i < ne; i++) D_tmp[i] = 1.0/d[i]; num_cols = C_diag_i[ns]; for (i=ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i=0; i < num_cols_offd_B; i++) A_marker[i] = -1; num_cols = C_offd_i[ns]; for (i=ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_SHARED); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul : multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt n_rows_A, n_cols_A; HYPRE_BigInt n_rows_B, n_cols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (n_rows_A != n_rows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; C_ext = hypre_ExchangeRAPData(C_int, comm_pkg_A); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) temp[C_ext_offd_size++] = C_ext_j[j]; else C_ext_diag_size++; C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { hypre_BigQsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } for (i=0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, HYPRE_MEMORY_SHARED); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, HYPRE_MEMORY_SHARED); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_SHARED); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_SHARED); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } break; } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_SHARED); hypre_TFree(C_offd_array, HYPRE_MEMORY_SHARED); } /*C = hypre_ParCSRMatrixCreate(comm, n_cols_A, n_cols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ #ifdef HYPRE_NO_GLOBAL_PARTITION /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag); #else first_row_index = col_starts_A[my_id]; local_num_rows = (HYPRE_Int)(col_starts_A[my_id+1]-first_row_index); first_col_diag = col_starts_B[my_id]; local_num_cols = (HYPRE_Int)(col_starts_B[my_id+1]-first_col_diag); #endif C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = n_cols_A; hypre_ParCSRMatrixGlobalNumCols(C) = n_cols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) hypre_ParCSRMatrixDiag(C) = C_diag; else hypre_ParCSRMatrixDiag(C) = C_tmp_diag; if (C_offd) hypre_ParCSRMatrixOffd(C) = C_offd; else hypre_ParCSRMatrixOffd(C) = C_tmp_offd; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) P_marker[i] = -1; jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i=0; i < num_cols_offd_C; i++) if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); if (C_diag) hypre_CSRMatrixDestroy(C_tmp_diag); if (C_offd) hypre_CSRMatrixDestroy(C_tmp_offd); return C; } HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, HYPRE_Complex *bdiaginv, hypre_ParCSRCommPkg *comm_pkg) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); HYPRE_Complex *dense = bdiaginv; /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; #ifdef HYPRE_NO_GLOBAL_PARTITION j = 2; #else j = num_procs + 1; #endif HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(part, hypre_ParVectorPartitioning(b), j*sizeof(HYPRE_BigInt)); hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), part ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As, HYPRE_Complex **bdiaginv, hypre_ParCSRCommPkg **commpkg_out ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_BigInt *row_starts_new, *col_starts_new; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); hypre_ParcsrGetExternalRows(A, num_ext_rows, ext_indices, &A_ext, commpkg_out); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j-1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i-1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j+i*blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm ) { dense[j+i*blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } #ifdef HYPRE_NO_GLOBAL_PARTITION j = 2; #else j = num_procs + 1; #endif row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_new, col_starts_new, num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* free workspace */ if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } HYPRE_Int hypre_ParcsrGetExternalRows( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_CSRMatrix **A_ext, hypre_ParCSRCommPkg **commpkg_out ) { HYPRE_Int i, j, k, i1, j1, start, end, num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a, *recv_a; hypre_ParCSRCommPkg *comm_pkg, *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(A, indices_len, indices, &comm_pkg); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_CTAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); /* prepare data to send out */ send_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); send_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); /* pointers to each proc in send_j */ send_jstarts = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); /* fill the CSR matrix: j and a */ for (i = 0, i1 = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); for (j = start; j < end; j++) { /* will send row j1 to send_proc[i] */ j1 = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j); /* open row j1 and fill ja and a to send */ for (k = A_diag_i[j1]; k < A_diag_i[j1+1]; k++) { send_j[i1] = first_row + (HYPRE_BigInt)A_diag_j[k]; send_a[i1] = A_diag_a[k]; i1++; } if (num_procs > 1) { for (k = A_offd_i[j1]; k < A_offd_i[j1+1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; send_a[i1] = A_offd_a[k]; i1++; } } } send_jstarts[i+1] = i1; } hypre_assert(i1 == num_nnz_send); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* do communication */ /* ja */ comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); /* a */ comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, send_a, recv_a); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); /* A_ext is ready */ *A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixI (*A_ext) = recv_i; hypre_CSRMatrixBigJ(*A_ext) = recv_j; hypre_CSRMatrixData(*A_ext) = recv_a; if (commpkg_out) { *commpkg_out = comm_pkg; } else { hypre_MatvecCommPkgDestroy(comm_pkg); } hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(send_a, HYPRE_MEMORY_HOST); hypre_TFree(send_jstarts, HYPRE_MEMORY_HOST); hypre_TFree(recv_jstarts, HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* C = alpha * A + beta * B * A and B are assumed to have the same row and column partitionings */ HYPRE_Int hypre_ParcsrAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **Cout ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int nnz_diag_A = A_diag_i[nrow_local]; HYPRE_Int nnz_offd_A = A_offd_i[nrow_local]; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_a = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Complex *B_offd_a = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_B_offd = hypre_CSRMatrixNumCols(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_B_offd, HYPRE_MEMORY_HOST); hypre_assert(nrow_global == hypre_ParCSRMatrixGlobalNumRows(B)); hypre_assert(ncol_global == hypre_ParCSRMatrixGlobalNumCols(B)); hypre_assert(nrow_local == hypre_CSRMatrixNumRows(B_diag)); hypre_assert(ncol_local == hypre_CSRMatrixNumCols(B_diag)); HYPRE_Int nnz_diag_B = B_diag_i[nrow_local]; HYPRE_Int nnz_offd_B = B_offd_i[nrow_local]; /* C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *row_starts_C, *col_starts_C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_Int num_cols_C_offd = num_cols_A_offd + num_cols_B_offd; HYPRE_BigInt *col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_C_offd, HYPRE_MEMORY_HOST); HYPRE_Int nnz_diag_C_alloc = nnz_diag_A + nnz_diag_B; HYPRE_Int nnz_offd_C_alloc = nnz_offd_A + nnz_offd_B; HYPRE_Int nnz_diag_C = 0, nnz_offd_C = 0; HYPRE_Int *C_diag_i = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); HYPRE_Int *C_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag_C_alloc, HYPRE_MEMORY_HOST); HYPRE_Complex *C_diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag_C_alloc, HYPRE_MEMORY_HOST); HYPRE_Int *C_offd_i = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); HYPRE_Int *C_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd_C_alloc, HYPRE_MEMORY_HOST); HYPRE_Complex *C_offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd_C_alloc, HYPRE_MEMORY_HOST); hypre_union2( num_cols_A_offd, col_map_offd_A, num_cols_B_offd, col_map_offd_B, &num_cols_C_offd, col_map_offd_C, A2C_offd, B2C_offd ); HYPRE_Int *marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); HYPRE_Int *marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_C_offd, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } for (i = 0; i < num_cols_C_offd; i++) { marker_offd[i] = -1; } /* main loop for each row i */ for (i = 0; i < nrow_local; i++) { HYPRE_Int diag_i_start = nnz_diag_C; HYPRE_Int offd_i_start = nnz_offd_C; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = alpha * val; nnz_diag_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { HYPRE_Int col = B_diag_j[j]; HYPRE_Complex val = B_diag_a[j]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = beta * val; nnz_diag_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(C_diag_j[p] == col); C_diag_a[p] += beta * val; } } C_diag_i[i+1] = nnz_diag_C; if (num_procs <= 1) { continue; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int colA = A_offd_j[j]; HYPRE_Int colC = A2C_offd[colA]; HYPRE_Complex val = A_offd_a[j]; if (marker_offd[colC] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = alpha * val; nnz_offd_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { HYPRE_Int colB = B_offd_j[j]; HYPRE_Int colC = B2C_offd[colB]; HYPRE_Complex val = B_offd_a[j]; if (marker_offd[colC] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = beta * val; nnz_offd_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_offd[colC]; hypre_assert(C_offd_j[p] == colC); C_offd_a[p] += beta * val; } } C_offd_i[i+1] = nnz_offd_C; } #ifdef HYPRE_NO_GLOBAL_PARTITION j = 2; #else j = num_procs + 1; #endif row_starts_C = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_C = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix C */ C = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_C, col_starts_C, num_cols_C_offd, nnz_diag_C, nnz_offd_C); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_a; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixData(C_offd) = C_offd_a; hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *Cout = C; /* done */ hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; }
GB_binop__bget_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_int32 // A.*B function (eWiseMult): GB_AemultB__bget_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_int32 // C+=b function (dense accum): GB_Cdense_accumb__bget_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int32 // C=scalar+B GB_bind1st__bget_int32 // C=scalar+B' GB_bind1st_tran__bget_int32 // C=A+scalar GB_bind2nd__bget_int32 // C=A'+scalar GB_bind2nd_tran__bget_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITGET (x, y, int32_t, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bget_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \ } GrB_Info GB_bind1st_tran__bget_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \ } GrB_Info GB_bind2nd_tran__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_threadprivate.c
// RUN: %libomp-compile-and-run /* * Threadprivate is tested in 2 ways: * 1. The global variable declared as threadprivate should have * local copy for each thread. Otherwise race condition and * wrong result. * 2. If the value of local copy is retained for the two adjacent * parallel regions */ #include "omp_testsuite.h" #include <stdlib.h> #include <stdio.h> static int sum0=0; static int myvalue = 0; #pragma omp threadprivate(sum0) #pragma omp threadprivate(myvalue) int test_omp_threadprivate() { int sum = 0; int known_sum; int i; int iter; int *data; int size; int num_failed = 0; int my_random; omp_set_dynamic(0); #pragma omp parallel private(i) { sum0 = 0; #pragma omp for for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; } /*end of for*/ #pragma omp critical { sum = sum + sum0; } /*end of critical */ } /* end of parallel */ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; if (known_sum != sum ) { fprintf (stderr, " known_sum = %d, sum = %d\n", known_sum, sum); } /* the next parallel region is just used to get the number of threads*/ omp_set_dynamic(0); #pragma omp parallel { #pragma omp master { size=omp_get_num_threads(); data=(int*) malloc(size*sizeof(int)); } }/* end parallel*/ srand(45); for (iter = 0; iter < 100; iter++) { my_random = rand(); /* random number generator is called inside serial region*/ /* the first parallel region is used to initialize myvalue and the array with my_random+rank */ #pragma omp parallel { int rank; rank = omp_get_thread_num (); myvalue = data[rank] = my_random + rank; } /* the second parallel region verifies that the value of "myvalue" is retained */ #pragma omp parallel reduction(+:num_failed) { int rank; rank = omp_get_thread_num (); num_failed = num_failed + (myvalue != data[rank]); if(myvalue != data[rank]) { fprintf (stderr, " myvalue = %d, data[rank]= %d\n", myvalue, data[rank]); } } } free (data); return (known_sum == sum) && !num_failed; } /* end of check_threadprivate*/ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_threadprivate()) { num_failed++; } } return num_failed; }
GB_binop__le_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__le_int16 // A.*B function (eWiseMult): GB_AemultB__le_int16 // A*D function (colscale): GB_AxD__le_int16 // D*A function (rowscale): GB_DxB__le_int16 // C+=B function (dense accum): GB_Cdense_accumB__le_int16 // C+=b function (dense accum): GB_Cdense_accumb__le_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_int16 // C=scalar+B GB_bind1st__le_int16 // C=scalar+B' GB_bind1st_tran__le_int16 // C=A+scalar GB_bind2nd__le_int16 // C=A'+scalar GB_bind2nd_tran__le_int16 // C type: bool // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__le_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__le_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__le_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__le_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__le_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__le_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__le_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__le_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__le_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__le_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__le_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-8a-inch/8a-64-outch/4b kernel_tm_pack8to4.create(2 * inch / 8, 64, outch / 8 + (outch % 8) / 4, (size_t)2u * 32, 32); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to4.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00 += 4; } } } } } static void conv3x3s1_winograd64_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; //size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; r0 += 8; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; r0 += 8; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float16x4_t _bias0 = bias ? vld1_f16((const __fp16*)bias + p * 4) : vdup_n_f16(0.f); __fp16 tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 4; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_f16(output0, _out00); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f))); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
atomic-5.c
/* PR middle-end/36106 */ /* { dg-additional-options "-mcx16" { target { { i?86-*-* x86_64-*-* } && lp64 } } } */ #ifdef __x86_64__ # include "cpuid.h" #endif extern void abort (void); int __attribute__((noinline)) do_test (void) { long double d = .0L; int i; #pragma omp parallel for shared (d) for (i = 0; i < 10; i++) #pragma omp atomic d += 1.0L; if (d != 10.0L) abort (); return 0; } int main (void) { #ifdef __x86_64__ unsigned int eax, ebx, ecx, edx; if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx)) return 0; if (!(ecx & bit_CMPXCHG16B)) return 0; #endif do_test (); return 0; }
ThreadSafeVec.h
// // smarties // Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved. // Distributed under the terms of the MIT license. // // Created by Guido Novati (novatig@ethz.ch). // #ifndef smarties_ThreadSafeVec_h #define smarties_ThreadSafeVec_h #include "../Settings/Definitions.h" #include <cassert> #include <memory> #include <omp.h> namespace smarties { template<typename T> struct THRvec { Uint nThreads; const T initial; std::vector<std::unique_ptr<T>> m_v; THRvec(const Uint size, const T init=T()) : nThreads(size), initial(init) { m_v.resize(nThreads); #pragma omp parallel for num_threads(nThreads) schedule(static, 1) for(Uint i=0; i<nThreads; ++i) m_v[i] = std::make_unique<T>(initial); } THRvec(const THRvec&c) = delete; void resize(const Uint N) { if(N == nThreads) return; m_v.resize(N); nThreads = N; #pragma omp parallel for schedule(static, 1) for(Uint i=0; i<N; ++i) { if(m_v[i]) continue; m_v[i] = std::make_unique<T>(initial); } } Uint size() const { return nThreads; } T& operator[] (const Uint i) const { assert(m_v[i]); return * m_v[i].get(); } }; } // end namespace smarties #endif // smarties_Settings_h
Example_target_task_reduction.2b.c
/* * @@name: target_task_reduction.2b.c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_5.0 */ #include <stdio.h> #pragma omp declare target to(device_compute) extern void device_compute(int *); extern void host_compute(int *); int main() { int sum = 0; #pragma omp parallel master reduction(task, +:sum) { #pragma omp target in_reduction(+:sum) nowait device_compute(&sum); host_compute(&sum); } printf( "sum = %d\n", sum); //OUTPUT: sum = 2 return 0; } void device_compute(int *sum){ *sum = 1; } void host_compute(int *sum){ *sum = 1; }
GB_bitmap_assign_M_row_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_M_row_template: traverse M for GB_ROW_ASSIGN //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // M is a 1-by-(C->vdim) hypersparse or sparse matrix, not a vector, for // GrB_Row_assign (if C is CSC) or GrB_Col_assign (if C is CSR). // C is bitmap/full. M is sparse/hyper, and can be jumbled. { ASSERT (mvlen == 1) ; int64_t iC = I [0] ; int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < M_ntasks ; tid++) { int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; int64_t task_cnvals = 0 ; //---------------------------------------------------------------------- // traverse over M (0,kfirst:klast) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of M(0,k) for this task //------------------------------------------------------------------ int64_t jM = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, mvlen) ; //------------------------------------------------------------------ // traverse over M(0,jM), the kth vector of M //------------------------------------------------------------------ // for row_assign: M is a single row, iC = I [0] // It has either 0 or 1 entry. int64_t pM = pM_start ; if (pM < pM_end) { bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t jC = jM ; int64_t pC = iC + jC * cvlen ; GB_MASK_WORK (pC) ; } } } cnvals += task_cnvals ; } }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32)),ceild(4*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(12*t1+Nx+15,32)),floord(24*t2+Nx+11,32)),floord(4*t3+Nx-9,32)),floord(24*t1-24*t2+Nz+Nx+13,32));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),8*t4+6);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_binop__lt_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__lt_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__lt_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__lt_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_fp32) // A*D function (colscale): GB (_AxD__lt_fp32) // D*A function (rowscale): GB (_DxB__lt_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__lt_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__lt_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_fp32) // C=scalar+B GB (_bind1st__lt_fp32) // C=scalar+B' GB (_bind1st_tran__lt_fp32) // C=A+scalar GB (_bind2nd__lt_fp32) // C=A'+scalar GB (_bind2nd_tran__lt_fp32) // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_FP32 || GxB_NO_LT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__abs_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__abs_int32_int32 // op(A') function: GB_unop_tran__abs_int32_int32 // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = GB_IABS (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__abs_int32_int32 ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = GB_IABS (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__abs_int32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_smithW-v6.2-target-inlined.c
/********************************************************************************* * Smith–Waterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * * Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode * gcc omp_smithW.c -O3 -o omp_smithW -fopenmp // production run * Execution: ./omp_smithW <number_of_col> <number_of_rows> * * Updated by C. Liao, Jan 2nd, 2019 *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <time.h> #include <assert.h> #include <stdbool.h> // C99 does not support the boolean data type #include "parameters.h" /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) // #define DEBUG /* End of Helpers */ #ifndef _OPENMP #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double omp_get_wtime() { return time_stamp(); } #endif /*-------------------------------------------------------------------- * Functions Prototypes */ #pragma omp declare target //Defines size of strings to be compared long long int m = 8 ; //Columns - Size of string a long long int n = 9; //Lines - Size of string b int gapScore = -2; //Defines scores int matchScore = 3; int missmatchScore = -3; //Strings over the Alphabet Sigma char *a, *b; int matchMissmatchScore(long long int i, long long int j); void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos); #pragma omp end declare target // without omp critical: how to conditionalize it? void similarityScore2(long long int i, long long int j, int* H, int* P, long long int* maxPos); void backtrack(int* P, long long int maxPos); void printMatrix(int* matrix); void printPredecessorMatrix(int* matrix); void generate(void); long long int nElement(long long int i); void calcFirstDiagElement(long long int i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ bool useBuiltInData=true; // the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s. /* End of global variables */ /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { // thread_count is no longer used int thread_count; if (argc==3) { m = strtoll(argv[1], NULL, 10); n = strtoll(argv[2], NULL, 10); useBuiltInData = false; } //#ifdef DEBUG if (useBuiltInData) printf ("Using built-in data for testing ..\n"); printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF); //#endif //Allocates a and b a = (char*) malloc(m * sizeof(char)); // printf ("debug: a's address=%p\n", a); b = (char*) malloc(n * sizeof(char)); // printf ("debug: b's address=%p\n", b); //Because now we have zeros m++; n++; //Allocates similarity matrix H int *H; H = (int *) calloc(m * n, sizeof(int)); // printf ("debug: H's address=%p\n", H); //Allocates predecessor matrix P int *P; P = (int *)calloc(m * n, sizeof(int)); // printf ("debug: P's address=%p\n", P); unsigned long long sz = (m+n +2*m*n)*sizeof(int)/1024/1024; if (sz>=1024) printf("Total memory footprint is:%llu GB\n", sz/1024) ; else printf("Total memory footprint is:%llu MB\n", sz); if (useBuiltInData) { //Uncomment this to test the sequence available at //http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 // OBS: m=11 n=7 // a[0] = 'C'; // a[1] = 'G'; // a[2] = 'T'; // a[3] = 'G'; // a[4] = 'A'; // a[5] = 'A'; // a[6] = 'T'; // a[7] = 'T'; // a[8] = 'C'; // a[9] = 'A'; // a[10] = 'T'; // b[0] = 'G'; // b[1] = 'A'; // b[2] = 'C'; // b[3] = 'T'; // b[4] = 'T'; // b[5] = 'A'; // b[6] = 'C'; // https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example // Using the wiki example to verify the results b[0] = 'G'; b[1] = 'G'; b[2] = 'T'; b[3] = 'T'; b[4] = 'G'; b[5] = 'A'; b[6] = 'C'; b[7] = 'T'; b[8] = 'A'; a[0] = 'T'; a[1] = 'G'; a[2] = 'T'; a[3] = 'T'; a[4] = 'A'; a[5] = 'C'; a[6] = 'G'; a[7] = 'G'; } else { //Gen random arrays a and b generate(); } //Start position for backtrack long long int maxPos = 0; //Calculates the similarity matrix long long int i, j; // The way to generate all wavefront is to go through the top edge elements // starting from the left top of the matrix, go to the bottom top -> down, then left->right // total top edge element count = dim1_size + dim2_size -1 //Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; #ifdef DEBUG printf("nDiag=%lld\n", nDiag); printf("Number of wavefront lines and their first element positions:\n"); #endif #ifdef _OPENMP #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); printf ("Using %d out of max %d threads...\n", thread_count, omp_get_max_threads()); } } // detect GPU support int runningOnGPU = 0; printf ("The number of target devices =%d\n", omp_get_num_devices()); /* Test if GPU is available using OpenMP4.5 */ #pragma omp target map(from:runningOnGPU) { // This function returns true if currently running on the host device, false otherwise. if (!omp_is_initial_device()) runningOnGPU = 1; } /* If still running on CPU, GPU must not be available */ if (runningOnGPU == 1) printf("### Able to use the GPU! ### \n"); else printf("### Unable to use the GPU, using CPU! ###\n"); #endif //Gets Initial time double initialTime = omp_get_wtime(); // mistake: element count, not byte size!! // int asz= m*n*sizeof(int); int asz= m*n; // choice 2: map data before the outer loop #pragma omp target map (to:a[0:m], b[0:n], nDiag, m,n,gapScore, matchScore, missmatchScore) map(tofrom: H[0:asz], P[0:asz], maxPos) // #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i) { for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding { long long int nEle, si, sj; // nEle = nElement(i); //---------------inlined ------------ if (i < m && i < n) { // smaller than both directions //Number of elements in the diagonal is increasing nEle = i; } else if (i < max(m, n)) { // smaller than only one direction //Number of elements in the diagonal is stable long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size nEle = min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); nEle = 2 * min - i + llabs(m - n) - 2; } //calcFirstDiagElement(i, &si, &sj); //------------inlined--------------------- // Calculate the first element of diagonal if (i < n) { // smaller than row count si = i; sj = 1; // start from the j==1 since j==0 is the padding } else { // now we sweep horizontally at the bottom of the matrix si = n - 1; // i is fixed sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1 } //-------------------------------------- { // choice 1: map data before the inner loop //#pragma omp target device(0) map (to:a[0:m], b[0:n], nEle, m,n,gapScore, matchScore, missmatchScore, si, sj) map(tofrom: H[0:asz], P[0:asz], maxPos) #pragma omp parallel for default(none) private(j) shared (a,b, nEle, m, n, gapScore, matchScore, missmatchScore, si, sj, H, P, maxPos) for (j = 0; j < nEle; ++j) { // going upwards : anti-diagnol direction long long int ai = si - j ; // going up vertically long long int aj = sj + j; // going right in horizontal ///------------inlined ------------------------------------------ // similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside { int up, left, diag; //Stores index of element long long int index = m * ai + aj; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal int t_mms; if (a[aj - 1] == b[ai - 1]) t_mms = matchScore; else t_mms = missmatchScore; diag = H[index - m - 1] + t_mms; // matchMissmatchScore(i, j); // degug here // return; //Calculates the maximum int max = NONE; int pred = NONE; if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[maxPos]) { #pragma omp critical maxPos = index; } } // --------------------------------------------------------------- } } } // for end nDiag } // end omp parallel double finalTime = omp_get_wtime(); printf("\nElapsed time for scoring matrix computation: %f\n", finalTime - initialTime); initialTime = omp_get_wtime(); backtrack(P, maxPos); finalTime = omp_get_wtime(); //Gets backtrack time finalTime = omp_get_wtime(); printf("Elapsed time for backtracking: %f\n", finalTime - initialTime); #ifdef DEBUG printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); #endif if (useBuiltInData) { printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false"); assert (H[n*m-1]==7); } //Frees similarity matrixes free(H); free(P); //Frees input arrays free(a); free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal's elements * i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored. */ long long int nElement(long long int i) { if (i < m && i < n) { // smaller than both directions //Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { // smaller than only one direction //Number of elements in the diagonal is stable long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size return min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); return 2 * min - i + llabs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding * Purpose: Calculate the position of (si, sj)-element * n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront */ void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (i < n) { // smaller than row count *si = i; *sj = 1; // start from the j==1 since j==0 is the padding } else { // now we sweep horizontally at the bottom of the matrix *si = n - 1; // i is fixed *sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1 } } /* // understanding the calculation by an example n =6 // row m =2 // col padded scoring matrix n=7 m=3 0 1 2 ------- 0 x x x 1 x x x 2 x x x 3 x x x 4 x x x 5 x x x 6 x x x We should peel off top row and left column since they are the padding the remaining 6x2 sub matrix is what is interesting for us Now find the number of wavefront lines and their first element's position in the scoring matrix total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1 We use the left most element in each wavefront line as its first element. Then we have the first elements like (1,1), (2,1) (3,1) .. (6,1) (6,2) */ /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j) * int *P; the predecessor array,storing which of the three elements is picked with max value */ #pragma omp declare target void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal int t_mms; if (a[j - 1] == b[i - 1]) t_mms = matchScore; else t_mms = missmatchScore; diag = H[index - m - 1] + t_mms; // matchMissmatchScore(i, j); // degug here // return; //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[*maxPos]) { #pragma omp critical *maxPos = index; } } /* End of similarityScore */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ int matchMissmatchScore(long long int i, long long int j) { if (a[j - 1] == b[i - 1]) return matchScore; else return missmatchScore; } /* End of matchMissmatchScore */ #pragma omp end declare target void similarityScore2(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal diag = H[index - m - 1] + matchMissmatchScore(i, j); //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[*maxPos]) { *maxPos = index; } } /* End of similarityScore2 */ /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ void backtrack(int* P, long long int maxPos) { //hold maxPos value long long int predPos; //backtrack from maxPos to startPos = 0 do { if (P[maxPos] == DIAGONAL) predPos = maxPos - m - 1; else if (P[maxPos] == UP) predPos = maxPos - m; else if (P[maxPos] == LEFT) predPos = maxPos - 1; P[maxPos] *= PATH; maxPos = predPos; } while (P[maxPos] != NONE); } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(int* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf("↑ "); else if (matrix[index] == -LEFT) printf("← "); else if (matrix[index] == -DIAGONAL) printf("↖ "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf("↑ "); else if (matrix[index] == LEFT) printf("← "); else if (matrix[index] == DIAGONAL) printf("↖ "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum threshold value. % % o low_white: Define the maximum threshold value. % % o high_white: Define the minimum threshold value. % % o low_white: Define the maximum threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=0; else q[i]=0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/fourier.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { PixelChannel channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; size_t columns, number_channels, rows; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images->next,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } image->depth=32UL; AppendImageToList(&complex_images,image); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { complex_images=DestroyImageList(complex_images); return(complex_images); } /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(images,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; number_channels=MagickMin(MagickMin(MagickMin( Ar_image->number_channels,Ai_image->number_channels),MagickMin( Br_image->number_channels,Bi_image->number_channels)),MagickMin( Cr_image->number_channels,Ci_image->number_channels)); Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; columns=MagickMin(Cr_image->columns,Ci_image->columns); rows=MagickMin(Cr_image->rows,Ci_image->rows); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(Cr_image,complex_images,rows,1L) #endif for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; Quantum *magick_restrict Ci, *magick_restrict Cr; ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,columns,1,exception); if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) || (Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) || (Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) number_channels; i++) { switch (op) { case AddComplexOperator: { Cr[i]=Ar[i]+Br[i]; Ci[i]=Ai[i]+Bi[i]; break; } case ConjugateComplexOperator: default: { Cr[i]=Ar[i]; Ci[i]=(-Ai[i]); break; } case DivideComplexOperator: { double gamma; gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br[i]*Br[i]+ QuantumScale*Bi[i]*Bi[i]+snr); Cr[i]=gamma*(QuantumScale*Ar[i]*Br[i]+QuantumScale*Ai[i]*Bi[i]); Ci[i]=gamma*(QuantumScale*Ai[i]*Br[i]-QuantumScale*Ar[i]*Bi[i]); break; } case MagnitudePhaseComplexOperator: { Cr[i]=sqrt(QuantumScale*Ar[i]*Ar[i]+QuantumScale*Ai[i]*Ai[i]); Ci[i]=atan2((double) Ai[i],(double) Ar[i])/(2.0*MagickPI)+0.5; break; } case MultiplyComplexOperator: { Cr[i]=(QuantumScale*Ar[i]*Br[i]-QuantumScale*Ai[i]*Bi[i]); Ci[i]=(QuantumScale*Ai[i]*Br[i]+QuantumScale*Ar[i]*Bi[i]); break; } case RealImaginaryComplexOperator: { Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5)); Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5)); break; } case SubtractComplexOperator: { Cr[i]=Ar[i]-Br[i]; Ci[i]=Ai[i]-Bi[i]; break; } } } Ar+=GetPixelChannels(Ar_image); Ai+=GetPixelChannels(Ai_image); Br+=GetPixelChannels(Br_image); Bi+=GetPixelChannels(Bi_image); Cr+=GetPixelChannels(Cr_image); Ci+=GetPixelChannels(Ci_image); } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) memcpy(roll_pixels,source_pixels,height*width*sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[width/2L-x-1L]=source_pixels[x+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; Quantum *q; ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) memset(magnitude_pixels,0,fourier_info->width* fourier_info->height*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) memset(phase_pixels,0,fourier_info->width* fourier_info->height*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } } i++; q+=GetPixelChannels(magnitude_image); } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } } i++; q+=GetPixelChannels(phase_image); } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; const Quantum *p; ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); memset(source_pixels,0,fourier_info->width*fourier_info->height* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(image,p); break; } case GreenPixelChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(image,p); break; } case BluePixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(image,p); break; } case BlackPixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlack(image,p); break; } case AlphaPixelChannel: { source_pixels[i]=QuantumScale*GetPixelAlpha(image,p); break; } } i++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize forward transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsImageGray(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayPixelChannel,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->alpha_trait != UndefinedPixelTrait) thread_status=ForwardFourierTransformChannel(image, AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; const Quantum *p; ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p); break; } case GreenPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p); break; } case BluePixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p); break; } case BlackPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p); break; } case AlphaPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p); break; } } i++; p+=GetPixelChannels(magnitude_image); } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p); break; } case GreenPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p); break; } case BluePixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p); break; } case BlackPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p); break; } case AlphaPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p); break; } } i++; p+=GetPixelChannels(phase_image); } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) memcpy(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; Quantum *q; ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels); fftw_destroy_plan(fftw_c2r_plan); i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BluePixelChannel: { SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BlackPixelChannel: { SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case AlphaPixelChannel: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } } i++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { size_t extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickCoreSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsImageGray(magnitude_image); if (is_gray != MagickFalse) is_gray=IsImageGray(phase_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayPixelChannel,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->alpha_trait != UndefinedPixelTrait) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
main.c
#include "libabl.h" #include "time.h" #include "stdio.h" #include "CL/cl.h" #include "omp.h" typedef struct { float2 pos; double desiredSpeed; float2 velocity; bool isFinished; int leader; int id; float2 target; int envId; }Point; static const type_info Point_info[] = { { TYPE_FLOAT2, offsetof(Point, pos), "pos", true }, { TYPE_FLOAT, offsetof(Point, desiredSpeed), "desiredSpeed", false }, { TYPE_FLOAT2, offsetof(Point, velocity), "velocity", false }, { TYPE_BOOL, offsetof(Point, isFinished), "isFinished", false }, { TYPE_INT, offsetof(Point, leader), "leader", false }, { TYPE_INT, offsetof(Point, id), "id", false }, { TYPE_FLOAT2, offsetof(Point, target), "target", false }, { TYPE_END, sizeof(Point), NULL } }; typedef struct { int mem_start; int mem_end; } env; struct agent_struct { dyn_array agents_Point; dyn_array agents_Point_dbuf; }; struct agent_struct agents; static const agent_info agents_info[] = { { Point_info, offsetof(struct agent_struct, agents_Point), "Point" }, { NULL, 0, NULL } }; double PI = 3.14159; int num_timesteps = 1000; int num_agents = 65536; double T = 0.54; double rho = 0.05; double r = 2.0; double lambda = 2.0; double gamma = 0.35; double n_prime = 3.0; double n = 2.0; double A = 4.5; int aa = 3; double bb = 0.1; double stepTime = 0.25; double W = 1144.87; float2 random_1(float2 min, float2 max) { return float2_create(random_float(min.x, max.x), random_float(min.y, max.y)); } float3 random_2(float3 min, float3 max) { return float3_create(random_float(min.x, max.x), random_float(min.y, max.y), random_float(min.z, max.z)); } double random_3(double max) { return random_float(0, max); } float2 random_4(float2 max) { return random_1(float2_fill(0), max); } float3 random_5(float3 max) { return random_2(float3_fill(0), max); } int randomInt_1(int max) { return random_int(0, max); } double clam(double pos, double min, double max) { return ((pos < min) ? min : ((pos > max) ? max : pos)); } float2 clam_1(float2 pos, float2 min, float2 max) { return float2_create(((pos.x < min.x) ? min.x : ((pos.x > max.x) ? max.x : pos.x)), ((pos.y < min.y) ? min.y : ((pos.y > max.y) ? max.y : pos.y))); } float3 clam_2(float3 pos, float3 min, float3 max) { return float3_create(((pos.x < min.x) ? min.x : ((pos.x > max.x) ? max.x : pos.x)), ((pos.y < min.y) ? min.y : ((pos.y > max.y) ? max.y : pos.y)), ((pos.z < min.z) ? min.z : ((pos.z > max.z) ? max.z : pos.z))); } double clam_3(double pos, double max) { return clam(pos, 0, max); } float2 clam_4(float2 pos, float2 max) { return clam_1(pos, float2_fill(0), max); } float3 clam_5(float3 pos, float3 max) { return clam_2(pos, float3_fill(0), max); } double wraparound(double pos, double max) { return ((pos < 0) ? (max + pos) : ((pos >= max) ? (pos - max) : pos)); } float2 wraparound_1(float2 pos, float2 max) { return float2_create(((pos.x < 0) ? (max.x + pos.x) : ((pos.x >= max.x) ? (pos.x - max.x) : pos.x)), ((pos.y < 0) ? (max.y + pos.y) : ((pos.y >= max.y) ? (pos.y - max.y) : pos.y))); } float3 wraparound_2(float3 pos, float3 max) { return float3_create(((pos.x < 0) ? (max.x + pos.x) : ((pos.x >= max.x) ? (pos.x - max.x) : pos.x)), ((pos.y < 0) ? (max.y + pos.y) : ((pos.y >= max.y) ? (pos.y - max.y) : pos.y)), ((pos.z < 0) ? (max.z + pos.z) : ((pos.z >= max.z) ? (pos.z - max.z) : pos.z))); } bool is_inside(float2 pos, float2 max) { return ((((pos.x >= 0) && (pos.y >= 0)) && (pos.x <= max.x)) && (pos.y <= max.y)); } bool isSortNeeded(int p1, int p2) { if ((p1 > p2)) return true; return false; } bool isSortNeeded_1(float2 p1, float2 p2) { if ((p1.x > p2.x)) return true; if ((p1.x == p2.x)) { if ((p1.y > p2.y)) { return true; } } return false; } int dijkstra(int n, int startnode, int endnode) { int cost[10000]; int distance[100]; int pred[100]; int visited[100]; int count = 0; int mindistance = 0; int nextnode = 0; for (int i = 0, _var0 = (int) n; i < _var0; ++i) { for (int j = 0, _var1 = (int) n; j < _var1; ++j) { if ((((i - j) == 1) || ((j - i) == 1))) cost[((i * (int) n) + j)] = 1; else cost[((i * (int) n) + j)] = 32767; } } for (int i = 0, _var2 = (int) n; i < _var2; ++i) { distance[i] = cost[(((int) startnode * (int) n) + i)]; pred[i] = startnode; visited[i] = 0; } distance[(int) startnode] = 0; visited[(int) startnode] = 1; count = 1; while ((count < ((int) n - 1))) { mindistance = 32767; for (int i = 0, _var3 = (int) n; i < _var3; ++i) { if (((distance[i] < mindistance) && (!(bool) visited[i]))) { mindistance = distance[i]; nextnode = i; } } visited[nextnode] = 1; for (int i = 0, _var4 = (int) n; i < _var4; ++i) { if ((!(bool) visited[i])) { if (((mindistance + cost[((nextnode * (int) n) + i)]) < distance[i])) { distance[i] = (mindistance + cost[((nextnode * (int) n) + i)]); pred[i] = nextnode; } } } count = (count + 1); } int j = (int) endnode; while ((pred[j] != (int) startnode)) { j = pred[j]; } return j; } Point* coexecution_merge_Point(Point* A0, Point* A1) { if ((A0->id == A0->leader)) A0->target = A1->target; return A0; } int main() { double wtime = omp_get_wtime(); for (int i = 0, _var5 = num_agents; i < _var5; ++i) { *DYN_ARRAY_PLACE(&agents.agents_Point, Point) = (Point) { .pos = random_4(float2_fill(W)), .desiredSpeed = random_float(0.1, 0.5), .velocity = float2_create(0, 0), .id = i, .leader = (128 * (i / 128)), .isFinished = false, .target = float2_create(1, 1), }; } cl_int ret; cl_device_id device_id = NULL; cl_uint num_of_platforms; cl_uint num_of_devices=0; clGetPlatformIDs(0, NULL, &num_of_platforms); cl_platform_id platform_ids[num_of_platforms]; ret = clGetPlatformIDs( num_of_platforms, platform_ids, NULL ); cl_command_queue command_queues[2]; if (!agents.agents_Point_dbuf.values) { agents.agents_Point_dbuf = DYN_ARRAY_COPY_FIXED(Point, &agents.agents_Point); } size_t _var6_Point = sizeof(Point)*agents.agents_Point.len; size_t _var6_Point_dbuf = sizeof(Point)*agents.agents_Point.len; cl_kernel kernel_Point_0s[2]; cl_kernel kernel_Point_1s[2]; cl_kernel st_kernel_Points[2]; cl_kernel mem_kernel_Points[2]; cl_kernel upenv_kernel_Points[2]; cl_mem _var6MemObj_Points[2]; cl_mem _var6MemObjDbuf_Points[2]; cl_mem _var6MemObjLen_Points[2]; cl_mem _var6MemObjEnv_Points[2]; int activeDevice[2]={0,1}; cl_mem _var6MemObjRng_Points[2]; char fileName[] = "kernel.cl"; char *source_str; size_t source_size; FILE *fp; fp = fopen(fileName, "r"); source_str = (char*)malloc(0x100000); source_size = fread(source_str, 1, 0x100000, fp); fclose(fp); for (int deviceIter = 0 ; deviceIter < 2 ; deviceIter++){ int devId = activeDevice[deviceIter]; ret = clGetDeviceIDs( platform_ids[devId], CL_DEVICE_TYPE_ALL, 1, &device_id, &ret ); cl_context_properties contextProperties[] = { CL_CONTEXT_PLATFORM, (cl_context_properties) platform_ids[devId], 0 }; cl_context context = clCreateContextFromType(contextProperties, CL_DEVICE_TYPE_ALL, NULL, NULL, &ret); cl_command_queue command_queue = clCreateCommandQueueWithProperties(context, device_id, 0, &ret); command_queues[deviceIter] = command_queue; cl_program program = clCreateProgramWithSource(context, 1, (const char **)&source_str, NULL, &ret); ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL); char* kernel_name_prefix = "compute_kernel_"; char number[2]; char kernel_name[50]; cl_mem _var6MemObj_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, _var6_Point, NULL , &ret); _var6MemObj_Points[deviceIter] = _var6MemObj_Point; cl_mem _var6MemObjDbuf_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, _var6_Point_dbuf, NULL , &ret); _var6MemObjDbuf_Points[deviceIter] = _var6MemObjDbuf_Point; cl_mem _var6MemObjLen_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(int), NULL , &ret); _var6MemObjLen_Points[deviceIter] = _var6MemObjLen_Point; cl_mem _var6MemObjEnv_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(env)*13225, NULL , &ret); _var6MemObjEnv_Points[deviceIter] = _var6MemObjEnv_Point; cl_uint2 *rngState_Point; rngState_Point = (cl_uint2 *)calloc(agents.agents_Point.len, sizeof(cl_uint2)); for ( int _var7 = 0 ; _var7 < agents.agents_Point.len ; _var7++ ) { cl_uint2 _var8 = { (uint32_t)(_var7 * 2), (uint32_t)(_var7 * 2 + 1) }; rngState_Point[_var7] = _var8; } cl_mem _var6MemObjRng_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(cl_uint2)*agents.agents_Point.len, NULL , &ret); _var6MemObjRng_Points[deviceIter] = _var6MemObjRng_Point; sprintf(number,"%d", deviceIter); sprintf(kernel_name,"%s%s_%s_%s", kernel_name_prefix, number, "Point","0"); cl_kernel kernel_Point_0 = clCreateKernel(program, kernel_name, &ret); kernel_Point_0s[deviceIter] = kernel_Point_0; sprintf(number,"%d", deviceIter); sprintf(kernel_name,"%s%s_%s_%s", kernel_name_prefix, number, "Point","1"); cl_kernel kernel_Point_1 = clCreateKernel(program, kernel_name, &ret); kernel_Point_1s[deviceIter] = kernel_Point_1; cl_kernel st_kernel_Point = clCreateKernel(program, "sorting_Point", &ret); st_kernel_Points[deviceIter] = st_kernel_Point; cl_kernel mem_kernel_Point = clCreateKernel(program, "mem_update_Point", &ret); mem_kernel_Points[deviceIter] = mem_kernel_Point; cl_kernel upenv_kernel_Point = clCreateKernel(program, "update_envId_Point", &ret); upenv_kernel_Points[deviceIter] = upenv_kernel_Point; ret = clSetKernelArg(kernel_Point_0, 0, sizeof(cl_mem), &_var6MemObj_Point); ret |= clSetKernelArg(kernel_Point_0, 1, sizeof(cl_mem), &_var6MemObjDbuf_Point); ret |= clSetKernelArg(kernel_Point_0, 2, sizeof(cl_mem), &_var6MemObjLen_Point); ret |= clSetKernelArg(kernel_Point_0, 3, sizeof(cl_mem), &_var6MemObjEnv_Point); ret |= clSetKernelArg(kernel_Point_0, 4, sizeof(cl_mem), &_var6MemObjRng_Point); ret = clSetKernelArg(kernel_Point_1, 0, sizeof(cl_mem), &_var6MemObj_Point); ret |= clSetKernelArg(kernel_Point_1, 1, sizeof(cl_mem), &_var6MemObjDbuf_Point); ret |= clSetKernelArg(kernel_Point_1, 2, sizeof(cl_mem), &_var6MemObjLen_Point); ret |= clSetKernelArg(kernel_Point_1, 3, sizeof(cl_mem), &_var6MemObjEnv_Point); ret |= clSetKernelArg(kernel_Point_1, 4, sizeof(cl_mem), &_var6MemObjRng_Point); ret = clSetKernelArg(upenv_kernel_Point, 0, sizeof(cl_mem), &_var6MemObj_Point); ret |= clSetKernelArg(upenv_kernel_Point, 1, sizeof(cl_mem), &_var6MemObjLen_Point); ret = clSetKernelArg(st_kernel_Point, 0, sizeof(cl_mem), &_var6MemObj_Point); ret |= clSetKernelArg(st_kernel_Point, 1, sizeof(cl_mem), &_var6MemObjDbuf_Point); ret |= clSetKernelArg(st_kernel_Point, 2, sizeof(cl_mem), &_var6MemObjLen_Point); ret = clSetKernelArg(mem_kernel_Point, 0, sizeof(cl_mem), &_var6MemObj_Point); ret |= clSetKernelArg(mem_kernel_Point, 1, sizeof(cl_mem), &_var6MemObjDbuf_Point); ret |= clSetKernelArg(mem_kernel_Point, 2, sizeof(cl_mem), &_var6MemObjLen_Point); ret |= clSetKernelArg(mem_kernel_Point, 3, sizeof(cl_mem), &_var6MemObjEnv_Point); ret = clEnqueueWriteBuffer(command_queue, _var6MemObj_Point, CL_TRUE, 0, _var6_Point, agents.agents_Point.values, 0, NULL, NULL); ret = clEnqueueWriteBuffer(command_queue, _var6MemObjDbuf_Point, CL_TRUE, 0, _var6_Point_dbuf, agents.agents_Point_dbuf.values, 0, NULL, NULL); ret = clEnqueueWriteBuffer(command_queue, _var6MemObjLen_Point, CL_TRUE, 0, sizeof(int), &agents.agents_Point.len, 0, NULL, NULL); ret = clEnqueueWriteBuffer(command_queue, _var6MemObjRng_Point, CL_TRUE, 0, sizeof(cl_uint2)*agents.agents_Point.len, rngState_Point, 0, NULL, NULL); } size_t localWorkSize = 128; Point* _var9buff_Point0 = calloc(sizeof(Point), agents.agents_Point.len); Point* _var9buff_Point1 = calloc(sizeof(Point), agents.agents_Point.len); Point* _var9buffMerge_Point = calloc(sizeof(Point), agents.agents_Point.len); env *_var9Env_Point = calloc(sizeof(env),13225); size_t globalWorkSize_Point = roundWorkSizeUp(128, agents.agents_Point.len); ret = clEnqueueNDRangeKernel(command_queues[0], upenv_kernel_Points[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL); for (int length = 1; length < globalWorkSize_Point; length <<= 1) for (int inc = length; inc > 0; inc >>= 1) { int dir = length << 1; clSetKernelArg(st_kernel_Points[0], 3, sizeof(int), &inc); clSetKernelArg(st_kernel_Points[0], 4, sizeof(int), &dir); clEnqueueNDRangeKernel(command_queues[0], st_kernel_Points[0], 1, NULL, &globalWorkSize_Point, NULL, 0, NULL, NULL); clFinish(command_queues[0]); } ret |= clEnqueueNDRangeKernel(command_queues[0], mem_kernel_Points[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL); clFinish(command_queues[0]); clEnqueueReadBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL); clEnqueueReadBuffer(command_queues[0], _var6MemObjEnv_Points[0], CL_TRUE, 0, sizeof(env)*13225, _var9Env_Point, 0, NULL, NULL); clEnqueueWriteBuffer(command_queues[1], _var6MemObj_Points[1], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL); clEnqueueWriteBuffer(command_queues[1], _var6MemObjEnv_Points[1], CL_TRUE, 0, sizeof(env)*13225, _var9Env_Point, 0, NULL, NULL); for (int _var10 = 0; _var10 < num_timesteps; _var10++) { ret = clEnqueueNDRangeKernel(command_queues[0], kernel_Point_0s[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL); ret |= clEnqueueNDRangeKernel(command_queues[1], kernel_Point_0s[1], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL); printf("%d %d\n",_var10,ret); clFinish(command_queues[1]); clFinish(command_queues[0]); clEnqueueReadBuffer(command_queues[1], _var6MemObj_Points[1], CL_TRUE, 0, _var6_Point, _var9buff_Point1, 0, NULL, NULL); clEnqueueReadBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL); #pragma omp for schedule(static, 128) for (int _var11 = 0; _var11 < agents.agents_Point.len; _var11++) { Point *_agent = coexecution_merge_Point(&_var9buff_Point0[_var11], &_var9buff_Point1[_var11]); _var9buffMerge_Point[_var11] = *_agent; } clEnqueueWriteBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, _var9buffMerge_Point, 0, NULL, NULL); ret = clEnqueueNDRangeKernel(command_queues[0], upenv_kernel_Points[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL); for (int length = 1; length < globalWorkSize_Point; length <<= 1) for (int inc = length; inc > 0; inc >>= 1) { int dir = length << 1; clSetKernelArg(st_kernel_Points[0], 3, sizeof(int), &inc); clSetKernelArg(st_kernel_Points[0], 4, sizeof(int), &dir); clEnqueueNDRangeKernel(command_queues[0], st_kernel_Points[0], 1, NULL, &globalWorkSize_Point, NULL, 0, NULL, NULL); clFinish(command_queues[0]); } ret |= clEnqueueNDRangeKernel(command_queues[0], mem_kernel_Points[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL); clFinish(command_queues[0]); clEnqueueReadBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL); clEnqueueReadBuffer(command_queues[0], _var6MemObjEnv_Points[0], CL_TRUE, 0, sizeof(env)*13225, _var9Env_Point, 0, NULL, NULL); clEnqueueWriteBuffer(command_queues[1], _var6MemObj_Points[1], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL); clEnqueueWriteBuffer(command_queues[1], _var6MemObjEnv_Points[1], CL_TRUE, 0, sizeof(env)*13225, _var9Env_Point, 0, NULL, NULL); } ret = clEnqueueReadBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, agents.agents_Point.values, 0, NULL, NULL); wtime = omp_get_wtime() - wtime; printf("Time elapsed is %f seconds\n", wtime); return 0; }
selectionSimd.c
/* Versão otimizada do algoritmo base do Selection Sort utilizando a diretiva do OpenMP #pragma omp for simd Compilação: gcc -o selectionSimd -fopenmp selectionSimd.c */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> void selectionSort (int vet[],int tam); void exibevetor(int vet[], int tam); void selectionSort (int vet[],int tam){ int i,j,aux,menor; #pragma omp for simd //instrui o compilador a distribuir as iterações do laço com o time de threads, operando em paralelo utilizando instruções SIMD for(i = 0; i < (tam-1); ++i) { menor = i; for(j = (i+1); j < tam; ++j){ if(vet[j] < vet[menor]) // busca pelo menor elemento atraves do indice menor= j; // salva o novo indice como menor } /* troca e coloca o menor elemento para frente */ if(vet[i] != vet[menor]){ aux = vet[i]; vet[i] = vet[menor]; vet[menor] = aux; } } } void exibevetor(int vet[], int tam){ int i; for (i = 0; i < tam; ++i){ printf("%d ", vet[i]); } printf("\n"); } int main () { int *vet, i, tam; clock_t t, end; double cpu_time_used; printf("Digite o tamanho do vetor:\n"); scanf("%d",&tam); vet = (int *)malloc(sizeof(int)*tam); if(vet == NULL){ exit(1); } for(i = 0; i < tam; ++i){ vet[i] = rand() %100; //preenche vetor com valores pseudo-aleatorios } exibevetor(vet,tam); t = clock(); selectionSort(vet,tam); t = clock()-t; exibevetor(vet,tam); cpu_time_used = ((double)t)/CLOCKS_PER_SEC; printf("\nTempo de execução: %f\n", cpu_time_used); free(vet); }
blackberry_ES10_fmt_plug.c
/* Cracker for BlackBerry Enterprise Server 10 hashes. * * Thanks to Nicolas RUFF for providing the algorithm details and sample * hashes! * * USE BDSMgmt; * SELECT LoginPassword FROM EASUsers; * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_blackberry1; #elif FMT_REGISTERS_H john_register_one(&fmt_blackberry1); #else #include <string.h> #include <errno.h> #include "sha2.h" #include "arch.h" //#undef _OPENMP //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> // OMP_SCALE tests (intel core i7) // 8 - 77766 // 64 - 80075 // 128 - 82016 -test=0 is still almost instant. // 256 - 81753 // 512 - 80537 #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #endif #include "memdbg.h" #define FORMAT_TAG "$bbes10$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define FORMAT_LABEL "Blackberry-ES10" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA-512 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT " (101x)" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define MAX_SALT_SIZE 64 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests blackberry_tests[] = { {"$bbes10$76BDF6BE760FCF5DEE7B20E27632D1FEDD9D64E1BBCC941F42957E87CBFB96F176324B2E2C71976CEBE67CA6F400F33F001D7453D80F4AF5D80C8A93ED0BA0E6$DB1C19C0", "toulouse"}, {"$bbes10$57ECCAA65BB087E3E506A8C5CEBEE193DD051538CE44F4156D65F1B44E0266DF49337EA11812DF12E39C8B12EB46F19C291FD9529CD4F09B3C8109BE6F4861E5$0wzWUnuQ", "test"}, {"$bbes10$217A6A0646ACF599B5A05A3D2B47F96B576353C74E4D28E857A476EFDFB36B27930FEDAA8064FFD17F36C7C854BED49FF95029B3310434BB2D05524043AE6E44$A5Dr4lXa", "ripper"}, {"$bbes10$DE1A954989FFED2D74900463A1AD7B14D852164D84AA0443F0EC59A0875A911C92CEF73E7C082B13864132644FA49DFEBDCF1D2DA0C9711CD4DC348A855F7285$MnphRIkf", "superbadPass"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int iterations; unsigned char salt[MAX_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; char *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LENGTH; if (0 < strlen(ctcopy) && '$' == ctcopy[strlen(ctcopy) - 1]) /* Can not end with '$' */ goto err; if ((p = strtokm(ctcopy, "$")) == NULL) /* hash */ goto err; if (strlen(p) != BINARY_SIZE * 2) goto err; if (!ishexuc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) > MAX_SALT_SIZE) goto err; p = strtokm(NULL, "$"); if (p) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); p = strrchr(ciphertext, '$') + 1; strcpy((char*)cs.salt, p); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; int i; char *p = ciphertext + FORMAT_TAG_LENGTH; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { int j; SHA512_CTX ctx; #ifdef SIMD_COEF_64 unsigned int i; unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys, tmpBuf[128]; uint64_t *keys64, *tmpBuf64=(uint64_t*)tmpBuf, *p64; keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE); keys64 = (uint64_t*)keys; memset(keys, 0, 128*MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i])); SHA512_Update(&ctx, cur_salt->salt, strlen((char*)cur_salt->salt)); SHA512_Final(tmpBuf, &ctx); p64 = &keys64[i%SIMD_COEF_64+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; for (j = 0; j < 8; ++j) #if ARCH_LITTLE_ENDIAN==1 p64[j*SIMD_COEF_64] = JOHNSWAP64(tmpBuf64[j]); #else p64[j*SIMD_COEF_64] = tmpBuf64[j]; #endif p64[8*SIMD_COEF_64] = 0x8000000000000000ULL; p64[15*SIMD_COEF_64] = 0x200; } for (j = 0; j < 98; j++) SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); SIMDSHA512body(keys, (uint64_t*)crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT); #else SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA512_Update(&ctx, cur_salt->salt, strlen((char*)cur_salt->salt)); SHA512_Final((unsigned char *)crypt_out[index], &ctx); /* now "h" (crypt_out[index] becomes our input * total SHA-512 calls => 101 */ for (j = 0; j < 99; j++) { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, (unsigned char*)crypt_out[index], 64); SHA512_Final((unsigned char *)crypt_out[index], &ctx); } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void blackberry_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_blackberry1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, blackberry_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, blackberry_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
ex_particle_OPENMP_seq.cuda.c
#include <stdio.h> __device__ inline int hclib_get_current_worker() { return blockIdx.x * blockDim.x + threadIdx.x; } template<class functor_type> __global__ void wrapper_kernel(unsigned iter_offset, unsigned niters, functor_type functor) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < niters) { functor(iter_offset + tid); } } template<class functor_type> static void kernel_launcher(const char *kernel_lbl, unsigned iter_offset, unsigned niters, functor_type functor) { const int threads_per_block = 256; const int nblocks = (niters + threads_per_block - 1) / threads_per_block; functor.transfer_to_device(); const unsigned long long start = capp_current_time_ns(); wrapper_kernel<<<nblocks, threads_per_block>>>(iter_offset, niters, functor); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error while synchronizing kernel - %s\n", cudaGetErrorString(err)); exit(2); } const unsigned long long end = capp_current_time_ns(); fprintf(stderr, "%s %llu ns\n", kernel_lbl, end - start); functor.transfer_from_device(); } #ifdef __cplusplus #ifdef __CUDACC__ #endif #endif /** * @file ex_particle_OPENMP_seq.c * @author Michael Trotter & Matt Goodrum * @brief Particle filter implementation in C/OpenMP */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include <limits.h> #include <time.h> #include <string.h> #define PI 3.1415926535897932 /** @var M value for Linear Congruential Generator (LCG); use GCC's value */ long M = INT_MAX; /** @var A value for LCG */ int A = 1103515245; /** @var C value for LCG */ int C = 12345; /***************************** *GET_TIME *returns a long int representing the time *****************************/ long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } // Returns the number of seconds elapsed between the two specified times float elapsed_time(long long start_time, long long end_time) { return (float) (end_time - start_time) / (1000 * 1000); } /** * Takes in a double and returns an integer that approximates to that double * @return if the mantissa < .5 => return value < input value; else return value > input value */ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /** * Set values of the 3D array to a newValue if that value is equal to the testValue * @param testValue The value to be replaced * @param newValue The value to replace testValue with * @param array3D The image vector * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames */ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a double representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /** * Sets values of 3D matrix using randomly generated numbers from a normal distribution * @param array3D The video to be modified * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param seed The seed array */ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /** * Fills a radius x radius matrix representing the disk * @param disk The pointer to the disk to be made * @param radius The radius of the disk to be made */ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /** * Dilates the provided video * @param matrix The video to be dilated * @param posX The x location of the pixel to be dilated * @param posY The y location of the pixel to be dilated * @param poxZ The z location of the pixel to be dilated * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param error The error radius */ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /** * Dilates the target matrix using the radius as a guide * @param matrix The reference matrix * @param dimX The x dimension of the video * @param dimY The y dimension of the video * @param dimZ The z dimension of the video * @param error The error radius to be dilated * @param newMatrix The target matrix */ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /** * Fills a 2D array describing the offsets of the disk object * @param se The disk object * @param numOnes The number of ones in the disk * @param neighbors The array that will contain the offsets * @param radius The radius used for dilation */ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y]){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /** * The synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise * @param I The video itself * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames of the video * @param seed The seed array used for number generation */ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = abs(x0 + (k-1)); yk = abs(y0 - 2*(k-1)); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /** * Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * @param I The 3D matrix * @param ind The current ind array * @param numOnes The length of ind array * @return A double representing the sum */ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((I[ind[y]] - 100),2) - pow((I[ind[y]]-228),2))/50.0; return likelihoodSum; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses sequential search * @param CDF The CDF * @param lengthCDF The length of CDF * @param value The value to be found * @return The index of value in the CDF; if value is never found, returns the last index */ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses binary search before switching to sequential search * @param CDF The CDF * @param beginIndex The index to start searching from * @param endIndex The index to stop searching * @param value The value to find * @return The index of value in the CDF; if value is never found, returns the last index * @warning Use at your own risk; not fully tested */ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value){ if(endIndex < beginIndex) return -1; int middleIndex = beginIndex + ((endIndex - beginIndex)/2); /*check the value*/ if(CDF[middleIndex] >= value) { /*check that it's good*/ if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(middleIndex > 0 && CDF[middleIndex-1] == value) middleIndex--; return middleIndex; } } if(CDF[middleIndex] > value) return findIndexBin(CDF, beginIndex, middleIndex+1, value); return findIndexBin(CDF, middleIndex-1, endIndex, value); } /** * The implementation of the particle filter using OpenMP for many frames * @see http://openmp.org/wp/ * @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods * @param I The video to be run * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames * @param seed The seed array used for random number generation * @param Nparticles The number of particles to be used */ class pragma373_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile weights; double* volatile h_weights; int x; volatile int Nparticles; public: pragma373_omp_parallel_hclib_async(double* set_weights, int set_x, int set_Nparticles) { h_weights = set_weights; x = set_x; Nparticles = set_Nparticles; } void transfer_to_device() { int i; cudaError_t err; weights = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 1, h_weights); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } } assert(weights || h_weights == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { weights[x] = 1/((double)(Nparticles)); } } } }; class pragma388_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile arrayX; double* volatile h_arrayX; int x; volatile double xe; double* volatile arrayY; double* volatile h_arrayY; volatile double ye; public: pragma388_omp_parallel_hclib_async(double* set_arrayX, int set_x, double set_xe, double* set_arrayY, double set_ye) { h_arrayX = set_arrayX; x = set_x; xe = set_xe; h_arrayY = set_arrayY; ye = set_ye; } void transfer_to_device() { int i; cudaError_t err; arrayX = NULL; arrayY = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 2, h_arrayX, h_arrayY); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } } assert(arrayX || h_arrayX == NULL); assert(arrayY || h_arrayY == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { arrayX[x] = xe; arrayY[x] = ye; } } } }; class pragma402_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; __device__ double randn(int * seed, int index) { { /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } } __device__ double randu(int * seed, int index) { { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } } double* volatile arrayX; double* volatile h_arrayX; int x; volatile int A; volatile int C; volatile long M; int* volatile seed; int* volatile h_seed; double* volatile arrayY; double* volatile h_arrayY; public: pragma402_omp_parallel_hclib_async(double* set_arrayX, int set_x, int set_A, int set_C, long set_M, int* set_seed, double* set_arrayY) { h_arrayX = set_arrayX; x = set_x; A = set_A; C = set_C; M = set_M; h_seed = set_seed; h_arrayY = set_arrayY; } void transfer_to_device() { int i; cudaError_t err; arrayX = NULL; seed = NULL; arrayY = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 3, h_arrayX, h_seed, h_arrayY); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (seed == NULL && (char *)h_seed >= (char *)host_allocations[i] && ((char *)h_seed - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_seed - (char *)host_allocations[i]); memcpy((void *)(&seed), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } } assert(arrayX || h_arrayX == NULL); assert(seed || h_seed == NULL); assert(arrayY || h_arrayY == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { arrayX[x] += 1 + 5*randn(seed, x); arrayY[x] += -2 + 2*randn(seed, x); } } } }; class pragma410_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; __device__ double roundDouble(double value) { { int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } } int y; volatile int countOnes; int indX; double* volatile arrayX; double* volatile h_arrayX; int x; double* volatile objxy; double* volatile h_objxy; int indY; double* volatile arrayY; double* volatile h_arrayY; int* volatile ind; int* volatile h_ind; volatile int IszY; volatile int Nfr; volatile int k; volatile int max_size; double* volatile likelihood; double* volatile h_likelihood; int* volatile I; int* volatile h_I; public: pragma410_omp_parallel_hclib_async(int set_y, int set_countOnes, int set_indX, double* set_arrayX, int set_x, double* set_objxy, int set_indY, double* set_arrayY, int* set_ind, int set_IszY, int set_Nfr, int set_k, int set_max_size, double* set_likelihood, int* set_I) { y = set_y; countOnes = set_countOnes; indX = set_indX; h_arrayX = set_arrayX; x = set_x; h_objxy = set_objxy; indY = set_indY; h_arrayY = set_arrayY; h_ind = set_ind; IszY = set_IszY; Nfr = set_Nfr; k = set_k; max_size = set_max_size; h_likelihood = set_likelihood; h_I = set_I; } void transfer_to_device() { int i; cudaError_t err; arrayX = NULL; objxy = NULL; arrayY = NULL; ind = NULL; likelihood = NULL; I = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 6, h_arrayX, h_objxy, h_arrayY, h_ind, h_likelihood, h_I); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (objxy == NULL && (char *)h_objxy >= (char *)host_allocations[i] && ((char *)h_objxy - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_objxy - (char *)host_allocations[i]); memcpy((void *)(&objxy), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } if (ind == NULL && (char *)h_ind >= (char *)host_allocations[i] && ((char *)h_ind - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_ind - (char *)host_allocations[i]); memcpy((void *)(&ind), (void *)(&tmp), sizeof(void *)); } if (likelihood == NULL && (char *)h_likelihood >= (char *)host_allocations[i] && ((char *)h_likelihood - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_likelihood - (char *)host_allocations[i]); memcpy((void *)(&likelihood), (void *)(&tmp), sizeof(void *)); } if (I == NULL && (char *)h_I >= (char *)host_allocations[i] && ((char *)h_I - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_I - (char *)host_allocations[i]); memcpy((void *)(&I), (void *)(&tmp), sizeof(void *)); } } assert(arrayX || h_arrayX == NULL); assert(objxy || h_objxy == NULL); assert(arrayY || h_arrayY == NULL); assert(ind || h_ind == NULL); assert(likelihood || h_likelihood == NULL); assert(I || h_I == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { //compute the likelihood: remember our assumption is that you know // foreground and the background image intensity distribution. // Notice that we consider here a likelihood ratio, instead of // p(z|x). It is possible in this case. why? a hometask for you. //calc ind for(y = 0; y < countOnes; y++){ indX = roundDouble(arrayX[x]) + objxy[y*2 + 1]; indY = roundDouble(arrayY[x]) + objxy[y*2]; ind[x*countOnes + y] = fabs((double)(indX*IszY*Nfr + indY*Nfr + k)); if(ind[x*countOnes + y] >= max_size) ind[x*countOnes + y] = 0; } likelihood[x] = 0; for(y = 0; y < countOnes; y++) likelihood[x] += (pow((I[ind[x*countOnes + y]] - 100),2) - pow((I[ind[x*countOnes + y]]-228),2))/50.0; likelihood[x] = likelihood[x]/((double) countOnes); } } } }; class pragma433_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile weights; double* volatile h_weights; int x; double* volatile likelihood; double* volatile h_likelihood; public: pragma433_omp_parallel_hclib_async(double* set_weights, int set_x, double* set_likelihood) { h_weights = set_weights; x = set_x; h_likelihood = set_likelihood; } void transfer_to_device() { int i; cudaError_t err; weights = NULL; likelihood = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 2, h_weights, h_likelihood); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } if (likelihood == NULL && (char *)h_likelihood >= (char *)host_allocations[i] && ((char *)h_likelihood - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_likelihood - (char *)host_allocations[i]); memcpy((void *)(&likelihood), (void *)(&tmp), sizeof(void *)); } } assert(weights || h_weights == NULL); assert(likelihood || h_likelihood == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { weights[x] = weights[x] * exp(likelihood[x]); } } } }; class pragma440_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double sumWeights; double* volatile weights; double* volatile h_weights; int x; public: pragma440_omp_parallel_hclib_async(double set_sumWeights, double* set_weights, int set_x) { sumWeights = set_sumWeights; h_weights = set_weights; x = set_x; } void transfer_to_device() { int i; cudaError_t err; weights = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 1, h_weights); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } } assert(weights || h_weights == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { sumWeights += weights[x]; } } } }; class pragma446_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile weights; double* volatile h_weights; int x; volatile double sumWeights; public: pragma446_omp_parallel_hclib_async(double* set_weights, int set_x, double set_sumWeights) { h_weights = set_weights; x = set_x; sumWeights = set_sumWeights; } void transfer_to_device() { int i; cudaError_t err; weights = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 1, h_weights); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } } assert(weights || h_weights == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { weights[x] = weights[x]/sumWeights; } } } }; class pragma455_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double xe; double* volatile arrayX; double* volatile h_arrayX; int x; double* volatile weights; double* volatile h_weights; double ye; double* volatile arrayY; double* volatile h_arrayY; public: pragma455_omp_parallel_hclib_async(double set_xe, double* set_arrayX, int set_x, double* set_weights, double set_ye, double* set_arrayY) { xe = set_xe; h_arrayX = set_arrayX; x = set_x; h_weights = set_weights; ye = set_ye; h_arrayY = set_arrayY; } void transfer_to_device() { int i; cudaError_t err; arrayX = NULL; weights = NULL; arrayY = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 3, h_arrayX, h_weights, h_arrayY); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } } assert(arrayX || h_arrayX == NULL); assert(weights || h_weights == NULL); assert(arrayY || h_arrayY == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } } } }; class pragma480_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile u; double* volatile h_u; int x; volatile double u1; volatile int Nparticles; public: pragma480_omp_parallel_hclib_async(double* set_u, int set_x, double set_u1, int set_Nparticles) { h_u = set_u; x = set_x; u1 = set_u1; Nparticles = set_Nparticles; } void transfer_to_device() { int i; cudaError_t err; u = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 1, h_u); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (u == NULL && (char *)h_u >= (char *)host_allocations[i] && ((char *)h_u - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_u - (char *)host_allocations[i]); memcpy((void *)(&u), (void *)(&tmp), sizeof(void *)); } } assert(u || h_u == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { u[x] = u1 + x/((double)(Nparticles)); } } } }; class pragma488_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; __device__ int findIndex(double * CDF, int lengthCDF, double value) { { int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } } int i; double* volatile CDF; double* volatile h_CDF; volatile int Nparticles; double* volatile u; double* volatile h_u; int j; double* volatile xj; double* volatile h_xj; double* volatile arrayX; double* volatile h_arrayX; double* volatile yj; double* volatile h_yj; double* volatile arrayY; double* volatile h_arrayY; public: pragma488_omp_parallel_hclib_async(int set_i, double* set_CDF, int set_Nparticles, double* set_u, int set_j, double* set_xj, double* set_arrayX, double* set_yj, double* set_arrayY) { i = set_i; h_CDF = set_CDF; Nparticles = set_Nparticles; h_u = set_u; j = set_j; h_xj = set_xj; h_arrayX = set_arrayX; h_yj = set_yj; h_arrayY = set_arrayY; } void transfer_to_device() { int i; cudaError_t err; CDF = NULL; u = NULL; xj = NULL; arrayX = NULL; yj = NULL; arrayY = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 6, h_CDF, h_u, h_xj, h_arrayX, h_yj, h_arrayY); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (CDF == NULL && (char *)h_CDF >= (char *)host_allocations[i] && ((char *)h_CDF - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_CDF - (char *)host_allocations[i]); memcpy((void *)(&CDF), (void *)(&tmp), sizeof(void *)); } if (u == NULL && (char *)h_u >= (char *)host_allocations[i] && ((char *)h_u - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_u - (char *)host_allocations[i]); memcpy((void *)(&u), (void *)(&tmp), sizeof(void *)); } if (xj == NULL && (char *)h_xj >= (char *)host_allocations[i] && ((char *)h_xj - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_xj - (char *)host_allocations[i]); memcpy((void *)(&xj), (void *)(&tmp), sizeof(void *)); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (yj == NULL && (char *)h_yj >= (char *)host_allocations[i] && ((char *)h_yj - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_yj - (char *)host_allocations[i]); memcpy((void *)(&yj), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } } assert(CDF || h_CDF == NULL); assert(u || h_u == NULL); assert(xj || h_xj == NULL); assert(arrayX || h_arrayX == NULL); assert(yj || h_yj == NULL); assert(arrayY || h_arrayY == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int j) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { i = findIndex(CDF, Nparticles, u[j]); if(i == -1) i = Nparticles-1; xj[j] = arrayX[i]; yj[j] = arrayY[i]; } } } }; void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){ int max_size = IszX*IszY*Nfr; long long start = get_time(); //original particle centroid double xe = roundDouble(IszY/2.0); double ye = roundDouble(IszX/2.0); //expected object locations, compared to center int radius = 5; int diameter = radius*2 - 1; int * disk = (int *)malloc(diameter*diameter*sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)malloc(countOnes*2*sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); //initial weights are all equal (1/Nparticles) double * weights = (double *)malloc(sizeof(double)*Nparticles); { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma373_omp_parallel", iters_offset, niters, pragma373_omp_parallel_hclib_async(weights, x, Nparticles)); } long long get_weights = get_time(); printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); //initial likelihood to 0.0 double * likelihood = (double *)malloc(sizeof(double)*Nparticles); double * arrayX = (double *)malloc(sizeof(double)*Nparticles); double * arrayY = (double *)malloc(sizeof(double)*Nparticles); double * xj = (double *)malloc(sizeof(double)*Nparticles); double * yj = (double *)malloc(sizeof(double)*Nparticles); double * CDF = (double *)malloc(sizeof(double)*Nparticles); double * u = (double *)malloc(sizeof(double)*Nparticles); int * ind = (int*)malloc(sizeof(int)*countOnes*Nparticles); { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma388_omp_parallel", iters_offset, niters, pragma388_omp_parallel_hclib_async(arrayX, x, xe, arrayY, ye)); } int k; printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time())); int indX, indY; for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); //apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma402_omp_parallel", iters_offset, niters, pragma402_omp_parallel_hclib_async(arrayX, x, A, C, M, seed, arrayY)); } long long error = get_time(); printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); //particle filter likelihood { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma410_omp_parallel", iters_offset, niters, pragma410_omp_parallel_hclib_async(y, countOnes, indX, arrayX, x, objxy, indY, arrayY, ind, IszY, Nfr, k, max_size, likelihood, I)); } long long likelihood_time = get_time(); printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); // update & normalize weights // using equation (63) of Arulampalam Tutorial { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma433_omp_parallel", iters_offset, niters, pragma433_omp_parallel_hclib_async(weights, x, likelihood)); } long long exponential = get_time(); printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma440_omp_parallel", iters_offset, niters, pragma440_omp_parallel_hclib_async(sumWeights, weights, x)); } long long sum_time = get_time(); printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma446_omp_parallel", iters_offset, niters, pragma446_omp_parallel_hclib_async(weights, x, sumWeights)); } long long normalize = get_time(); printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; // estimate the object location by expected values { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma455_omp_parallel", iters_offset, niters, pragma455_omp_parallel_hclib_async(xe, arrayX, x, weights, ye, arrayY)); } long long move_time = get_time(); printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); //display(hold off for now) //pause(hold off for now) //resampling CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma480_omp_parallel", iters_offset, niters, pragma480_omp_parallel_hclib_async(u, x, u1, Nparticles)); } long long u_time = get_time(); printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); int j, i; { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma488_omp_parallel", iters_offset, niters, pragma488_omp_parallel_hclib_async(i, CDF, Nparticles, u, j, xj, arrayX, yj, arrayY)); } long long xyj_time = get_time(); printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); //#pragma omp parallel for shared(weights, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ //reassign arrayX and arrayY arrayX[x] = xj[x]; arrayY[x] = yj[x]; weights[x] = 1/((double)(Nparticles)); } long long reset = get_time(); printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } free(disk); free(objxy); free(weights); free(likelihood); free(xj); free(yj); free(arrayX); free(arrayY); free(CDF); free(u); free(ind); } int main(int argc, char * argv[]){ char* usage = "openmp.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>"; //check number of arguments if(argc != 9) { printf("%s\n", usage); return 0; } //check args deliminators if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) { printf( "%s\n",usage ); return 0; } int IszX, IszY, Nfr, Nparticles; //converting a string to a integer if( sscanf( argv[2], "%d", &IszX ) == EOF ) { printf("ERROR: dimX input is incorrect"); return 0; } if( IszX <= 0 ) { printf("dimX must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[4], "%d", &IszY ) == EOF ) { printf("ERROR: dimY input is incorrect"); return 0; } if( IszY <= 0 ) { printf("dimY must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[6], "%d", &Nfr ) == EOF ) { printf("ERROR: Number of frames input is incorrect"); return 0; } if( Nfr <= 0 ) { printf("number of frames must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) { printf("ERROR: Number of particles input is incorrect"); return 0; } if( Nparticles <= 0 ) { printf("Number of particles must be > 0\n"); return 0; } //establish seed int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; //malloc matrix int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); //call video sequence videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); //call particle filter particleFilter(I, IszX, IszY, Nfr, seed, Nparticles); long long endParticleFilter = get_time(); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); free(seed); free(I); return 0; }
GB_unop__identity_fc32_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_fp64 // op(A') function: GB_unop_tran__identity_fc32_fp64 // C type: GxB_FC32_t // A type: double // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_fp64 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; mutable IdentifierInfo *Ident_abstract; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector" /// and "bool" fast comparison. Only present if AltiVec or ZVector are /// enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; IdentifierInfo *Ident_Bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && Tok.getIdentifierInfo() != Ident_Bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser &p) : P(p), PrevPreferredType(P.PreferredType) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseSYCLUniqueStableNameExpression(); ExprResult ParseSYCLUniqueStableIdExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr, bool EnterForConditionScope = false); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); struct DesignatorCompletionInfo { SmallVectorImpl<Expr *> &InitExprs; QualType PreferredBaseType; }; ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, SourceLocation *LParenLoc = nullptr, SourceLocation *RParenLoc = nullptr); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID, bool DiagnoseEmptyAttrs = false); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Emit warnings for C++11 and C2x attributes that are in a position that /// clang accepts as an extension. void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); enum ParseAttrKindMask { PAKM_GNU = 1 << 0, PAKM_Declspec = 1 << 1, PAKM_CXX11 = 1 << 2, }; /// \brief Parse attributes based on what syntaxes are desired, allowing for /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec: /// __attribute__((...)) __declspec(...) __attribute__((...))) /// Note that Microsoft attributes (spelled with single square brackets) are /// not supported by this because of parsing ambiguities with other /// constructs. /// /// There are some attribute parse orderings that should not be allowed in /// arbitrary order. e.g., /// /// [[]] __attribute__(()) int i; // OK /// __attribute__(()) [[]] int i; // Not OK /// /// Such situations should use the specific attribute parsing functionality. void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr); void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); } /// \brief Possibly parse attributes based on what syntaxes are desired, /// allowing for the order to vary. bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. bool MaybeParseGNUAttributes(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(Attrs, EndLoc, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); return true; } return false; } bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParseGNUAttributes(Attrs, EndLoc, LateAttrs); return true; } return false; } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. void ParseGNUAttributes(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D); Attrs.takeAllFrom(AttrsWithRange); } void ParseGNUAttributes(ParsedAttributesWithRange &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) { ParseCXX11Attributes(attrs, endLoc); return true; } return false; } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) { ParseMicrosoftDeclSpecs(Attrs, End); return true; } return false; } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; bool isClassCompatibleKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributesWithRange &Attrs, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp [begin] declare target'. void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind, OpenMPDirectiveKind EndDKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); /// Parses clause with an interop variable of kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. // OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); /// SYCL Type Traits // __builtin_num_fields, __builtin_num_bases ExprResult ParseSYCLBuiltinNum(); // __builtin_field_type, __builtin_base_type ExprResult ParseSYCLBuiltinType(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
GB_binop__bclr_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint32) // C=scalar+B GB (_bind1st__bclr_uint32) // C=scalar+B' GB (_bind1st_tran__bclr_uint32) // C=A+scalar GB (_bind2nd__bclr_uint32) // C=A'+scalar GB (_bind2nd_tran__bclr_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = GB_BITCLR (aij, bij, uint32_t, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, uint32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT32 || GxB_NO_BCLR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, uint32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bclr_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, uint32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
completion.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "completion.h" #include "../io.h" #include "../util.h" #include <math.h> #include <omp.h> /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Predict a value for a three-way tensor. * * @param model The model to use for the prediction. * @param test The test tensor which gives us the model rows. * @param index The nonzero to draw the row indices from. test[0][index] ... * * @return The predicted value. */ static inline val_t p_predict_val3( tc_model const * const model, sptensor_t const * const test, idx_t const index) { val_t est = 0; idx_t const nfactors = model->rank; assert(test->nmodes == 3); idx_t const i = test->ind[0][index]; idx_t const j = test->ind[1][index]; idx_t const k = test->ind[2][index]; val_t const * const restrict A = model->factors[0] + (i * nfactors); val_t const * const restrict B = model->factors[1] + (j * nfactors); val_t const * const restrict C = model->factors[2] + (k * nfactors); for(idx_t f=0; f < nfactors; ++f) { est += A[f] * B[f] * C[f]; } return est; } /** * @brief Predict a value for a three-way tensor when the model uses column-major * matrices. * * @param model The column-major model to use for the prediction. * @param test The test tensor which gives us the model rows. * @param index The nonzero to draw the row indices from. test[0][index] ... * * @return The predicted value. */ static inline val_t p_predict_val3_col( tc_model const * const model, sptensor_t const * const test, idx_t const index) { val_t est = 0; idx_t const nfactors = model->rank; assert(test->nmodes == 3); idx_t const i = test->ind[0][index]; idx_t const j = test->ind[1][index]; idx_t const k = test->ind[2][index]; idx_t const I = model->dims[0]; idx_t const J = model->dims[1]; idx_t const K = model->dims[2]; val_t const * const restrict A = model->factors[0]; val_t const * const restrict B = model->factors[1]; val_t const * const restrict C = model->factors[2]; for(idx_t f=0; f < nfactors; ++f) { est += A[i+(f*I)] * B[j+(f*J)] * C[k+(f*K)]; } return est; } /** * @brief Print some basic statistics about factorization progress. * * @param epoch Which epoch we are on. * @param loss The sum-of-squared loss. * @param rmse_tr The RMSE on the training set. * @param rmse_vl The RMSE on the validation set. * @param ws Workspace, used for timing information. */ static void p_print_progress( idx_t const epoch, val_t const loss, val_t const rmse_tr, val_t const rmse_vl, tc_ws const * const ws) { printf("%"SPLATT_PF_IDX"\t%0.5e\t%0.5e\t%0.5e\t%0.3f\n", epoch, loss, rmse_tr, rmse_vl, ws->tc_time.seconds); // printf("epoch:%4"SPLATT_PF_IDX" loss: %0.5e " // "RMSE-tr: %0.5e RMSE-vl: %0.5e time: %0.3fs\n", // epoch, loss, rmse_tr, rmse_vl, ws->tc_time.seconds); } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ val_t tc_rmse( sptensor_t const * const test, tc_model const * const model, tc_ws * const ws) { #ifdef SPLATT_USE_MPI val_t loss = tc_loss_sq(test, model, ws); /* TODO: this portion is untimed, so recompute it each time for now */ idx_t global_nnz = test->nnz; MPI_Allreduce(MPI_IN_PLACE, &global_nnz, 1, SPLATT_MPI_IDX, MPI_SUM, ws->rinfo->comm_3d); return sqrt(loss / global_nnz); #else return sqrt(tc_loss_sq(test, model, ws) / test->nnz); #endif } val_t tc_mae( sptensor_t const * const test, tc_model const * const model, tc_ws * const ws) { val_t loss_obj = 0.; val_t const * const restrict test_vals = test->vals; #pragma omp parallel reduction(+:loss_obj) { val_t * buffer = ws->thds[splatt_omp_get_thread_num()].scratch[0]; if(model->which == SPLATT_TC_CCD) { #pragma omp for schedule(static) for(idx_t x=0; x < test->nnz; ++x) { val_t const predicted = tc_predict_val_col(model, test, x, buffer); loss_obj += fabs(test_vals[x] - predicted); } } else { #pragma omp for schedule(static) for(idx_t x=0; x < test->nnz; ++x) { val_t const predicted = tc_predict_val(model, test, x, buffer); loss_obj += fabs(test_vals[x] - predicted); } } } #ifdef SPLATT_USE_MPI MPI_Allreduce(MPI_IN_PLACE, &loss_obj, 1, SPLATT_MPI_VAL, MPI_SUM, ws->rinfo->comm_3d); idx_t nnz = test->nnz; MPI_Allreduce(MPI_IN_PLACE, &nnz, 1, SPLATT_MPI_IDX, MPI_SUM, ws->rinfo->comm_3d); return loss_obj / nnz; #else return loss_obj / test->nnz; #endif } val_t tc_loss_sq( sptensor_t const * const test, tc_model const * const model, tc_ws * const ws) { val_t loss_obj = 0.; val_t const * const restrict test_vals = test->vals; #pragma omp parallel reduction(+:loss_obj) { val_t * buffer = ws->thds[splatt_omp_get_thread_num()].scratch[0]; if(model->which == SPLATT_TC_CCD) { #pragma omp for schedule(static) for(idx_t x=0; x < test->nnz; ++x) { val_t const err = test_vals[x] - tc_predict_val_col(model, test, x, buffer); loss_obj += err * err; } } else { #pragma omp for schedule(static) for(idx_t x=0; x < test->nnz; ++x) { val_t const err = test_vals[x] - tc_predict_val(model, test, x, buffer); loss_obj += err * err; } } } #ifdef SPLATT_USE_MPI MPI_Allreduce(MPI_IN_PLACE, &loss_obj, 1, SPLATT_MPI_VAL, MPI_SUM, ws->rinfo->comm_3d); #endif return loss_obj; } val_t tc_frob_sq( tc_model const * const model, tc_ws const * const ws) { idx_t const nfactors = model->rank; val_t reg_obj = 0.; #pragma omp parallel reduction(+:reg_obj) { for(idx_t m=0; m < model->nmodes; ++m) { val_t accum = 0; #ifdef SPLATT_USE_MPI idx_t const nrows = model->globmats[m]->I; val_t const * const restrict mat = model->globmats[m]->vals; #else idx_t const nrows = model->dims[m]; val_t const * const restrict mat = model->factors[m]; #endif #pragma omp for schedule(static) nowait for(idx_t x=0; x < nrows * nfactors; ++x) { accum += mat[x] * mat[x]; } reg_obj += ws->regularization[m] * accum; } } /* end omp parallel */ #ifdef SPLATT_USE_MPI MPI_Allreduce(MPI_IN_PLACE, &reg_obj, 1, SPLATT_MPI_VAL, MPI_SUM, ws->rinfo->comm_3d); #endif assert(reg_obj > 0); return reg_obj; } val_t tc_predict_val( tc_model const * const model, sptensor_t const * const test, idx_t const index, val_t * const restrict buffer) { if(test->nmodes == 3) { return p_predict_val3(model, test, index); } idx_t const nfactors = model->rank; /* initialize accumulation of each latent factor with the first row */ idx_t const row_id = test->ind[0][index]; val_t const * const init_row = model->factors[0] + (row_id * nfactors); for(idx_t f=0; f < nfactors; ++f) { buffer[f] = init_row[f]; } /* now multiply each factor by A(i,:), B(j,:) ... */ idx_t const nmodes = model->nmodes; for(idx_t m=1; m < nmodes; ++m) { idx_t const row_id = test->ind[m][index]; val_t const * const row = model->factors[m] + (row_id * nfactors); for(idx_t f=0; f < nfactors; ++f) { buffer[f] *= row[f]; } } /* finally, sum the factors to form the final estimated value */ val_t est = 0; for(idx_t f=0; f < nfactors; ++f) { est += buffer[f]; } return est; } val_t tc_predict_val_col( tc_model const * const model, sptensor_t const * const test, idx_t const index, val_t * const restrict buffer) { if(test->nmodes == 3) { return p_predict_val3_col(model, test, index); } idx_t const nfactors = model->rank; /* initialize accumulation of each latent factor with the first row */ idx_t const row_id = test->ind[0][index]; val_t const * const init_row = model->factors[0] + (row_id * nfactors); for(idx_t f=0; f < nfactors; ++f) { buffer[f] = model->factors[0][row_id + (f * model->dims[0])]; } /* now multiply each factor by A(i,:), B(j,:) ... */ idx_t const nmodes = model->nmodes; for(idx_t m=1; m < nmodes; ++m) { idx_t const row_id = test->ind[m][index]; for(idx_t f=0; f < nfactors; ++f) { buffer[f] *= model->factors[m][row_id + (f * model->dims[m])]; } } /* finally, sum the factors to form the final estimated value */ val_t est = 0; for(idx_t f=0; f < nfactors; ++f) { est += buffer[f]; } return est; } /****************************************************************************** * WORKSPACE FUNCTIONS *****************************************************************************/ tc_model * tc_model_alloc( sptensor_t const * const train, idx_t const rank, splatt_tc_type const which) { tc_model * model = splatt_malloc(sizeof(*model)); model->which = which; model->rank = rank; model->nmodes = train->nmodes; for(idx_t m=0; m < train->nmodes; ++m) { model->dims[m] = train->dims[m]; size_t const bytes = model->dims[m] * rank * sizeof(**(model->factors)); model->factors[m] = splatt_malloc(bytes); fill_rand(model->factors[m], model->dims[m] * rank); } return model; } tc_model * tc_model_copy( tc_model const * const model) { tc_model * ret = splatt_malloc(sizeof(*model)); ret->which = model->which; ret->rank = model->rank; ret->nmodes = model->nmodes; for(idx_t m=0; m < model->nmodes; ++m) { ret->dims[m] = model->dims[m]; idx_t const bytes = model->dims[m] * model->rank * sizeof(**(model->factors)); ret->factors[m] = splatt_malloc(bytes); par_memcpy(ret->factors[m], model->factors[m], bytes); } return ret; } void tc_model_free( tc_model * model) { for(idx_t m=0; m < model->nmodes; ++m) { splatt_free(model->factors[m]); #ifdef SPLATT_USE_MPI mat_free(model->globmats[m]); #endif } splatt_free(model); } tc_ws * tc_ws_alloc( sptensor_t const * const train, tc_model const * const model, idx_t nthreads) { tc_ws * ws = splatt_malloc(sizeof(*ws)); idx_t const nmodes = model->nmodes; ws->nmodes = nmodes; /* some reasonable defaults */ ws->learn_rate = 0.001; idx_t const rank = model->rank; ws->maxdense_dim = 0; ws->num_dense =0; /* Set mode-specific parameters, etc. */ for(idx_t m=0; m < nmodes; ++m) { /* dense modes */ ws->isdense[m] = train->dims[m] < DENSEMODE_THRESHOLD; if(ws->isdense[m]) { ws->maxdense_dim = SS_MAX(ws->maxdense_dim, train->dims[m]); ++(ws->num_dense); } switch(model->which) { case SPLATT_TC_GD: ws->regularization[m] = 1e-2; break; case SPLATT_TC_NLCG: ws->regularization[m] = 1e-2; break; case SPLATT_TC_LBFGS: ws->regularization[m] = 1e-2; break; case SPLATT_TC_SGD: ws->regularization[m] = 5e-3; break; case SPLATT_TC_CCD: ws->regularization[m] = 1e2; break; case SPLATT_TC_ALS: case SPLATT_TC_SPALS: case SPLATT_TC_RRALS: ws->regularization[m] = 1e2; break; case SPLATT_TC_NALGS: break; } } /* reset timers */ timer_reset(&ws->grad_time); timer_reset(&ws->line_time); timer_reset(&ws->tc_time); /* size of largest mode (for CCD) */ idx_t const max_dim = train->dims[argmax_elem(train->dims, train->nmodes)]; /* Allocate general structures */ ws->numerator = NULL; ws->denominator = NULL; ws->nthreads = nthreads; switch(model->which) { case SPLATT_TC_GD: ws->thds = thd_init(nthreads, 1, rank * sizeof(val_t)); break; case SPLATT_TC_LBFGS: ws->thds = thd_init(nthreads, 1, rank * sizeof(val_t)); break; case SPLATT_TC_NLCG: ws->thds = thd_init(nthreads, 1, rank * sizeof(val_t)); break; case SPLATT_TC_SGD: ws->thds = thd_init(nthreads, 1, rank * sizeof(val_t)); break; case SPLATT_TC_CCD: ws->numerator = splatt_malloc(max_dim * sizeof(*ws->numerator)); ws->denominator = splatt_malloc(max_dim * sizeof(*ws->denominator)); ws->thds = thd_init(nthreads, 1, rank * sizeof(val_t)); break; case SPLATT_TC_ALS: case SPLATT_TC_SPALS: case SPLATT_TC_RRALS: ws->thds = thd_init(nthreads, 5, rank * sizeof(val_t), /* prediction buffer */ nmodes * rank * sizeof(val_t), /* MTTKRP buffer */ rank * rank * sizeof(val_t), /* normal equations */ rank * ALS_BUFSIZE * sizeof(val_t), /* pre-normal equations buffer */ nmodes * rank * sizeof(val_t)); /* accum for pre-normacasel */ break; case SPLATT_TC_NALGS: ws->thds = NULL; fprintf(stderr, "SPLATT: completion algorithm not recognized.\n"); break; } /* convergence */ ws->max_its = 1000; ws->num_inner = 1; ws->max_seconds = -1; ws->max_badepochs = 20; ws->nbadepochs = 0; ws->best_epoch = 0; ws->best_rmse = SPLATT_VAL_MAX; ws->tolerance = 1e-4; ws->best_model = tc_model_copy(model); #ifdef SPLATT_USE_MPI ws->nbr2globs_buf = NULL; ws->local2nbr_buf = NULL; #endif return ws; } void tc_ws_free( tc_ws * ws) { thd_free(ws->thds, ws->nthreads); splatt_free(ws->numerator); splatt_free(ws->denominator); #ifdef SPLATT_USE_MPI splatt_free(ws->nbr2globs_buf); splatt_free(ws->local2nbr_buf); rank_free(*(ws->rinfo), ws->best_model->nmodes); #endif tc_model_free(ws->best_model); splatt_free(ws); } bool tc_converge( sptensor_t const * const train, sptensor_t const * const validate, tc_model const * const model, val_t const loss, val_t const frobsq, idx_t const epoch, tc_ws * const ws) { #ifdef SPLATT_USE_MPI val_t const train_rmse = sqrt(loss / ws->rinfo->global_nnz); #else val_t const train_rmse = sqrt(loss / train->nnz); #endif timer_stop(&ws->tc_time); val_t converge_rmse = train_rmse; /* optionally compute validation */ val_t val_rmse = 0.; if(validate != NULL) { val_rmse = tc_rmse(validate, model, ws); /* base convergence on validation */ converge_rmse = val_rmse; } #ifdef SPLATT_USE_MPI if(ws->rinfo->rank == 0) { p_print_progress(epoch, loss, train_rmse, val_rmse, ws); } #else p_print_progress(epoch, loss, train_rmse, val_rmse, ws); #endif bool converged = false; if(converge_rmse - ws->best_rmse < -(ws->tolerance)) { ws->nbadepochs = 0; ws->best_rmse = converge_rmse; ws->best_epoch = epoch; /* save the best model */ for(idx_t m=0; m < model->nmodes; ++m) { par_memcpy(ws->best_model->factors[m], model->factors[m], model->dims[m] * model->rank * sizeof(**(model->factors))); /* TODO copy globmats too */ } } else { ++ws->nbadepochs; if(ws->nbadepochs == ws->max_badepochs) { converged = true; } } /* check for time limit */ if(ws->max_seconds > 0 && ws->tc_time.seconds >= ws->max_seconds) { converged = true; } if(!converged) { timer_start(&ws->tc_time); } return converged; } /****************************************************************************** * MPI FUNCTIONS *****************************************************************************/ #ifdef SPLATT_USE_MPI int mpi_tc_distribute_coarse( char const * const train_fname, char const * const validate_fname, idx_t const * const dims, sptensor_t * * train_out, sptensor_t * * validate_out, rank_info * const rinfo) { rinfo->decomp = SPLATT_DECOMP_COARSE; for(idx_t m=0; m < MAX_NMODES; ++m) { rinfo->dims_3d[m] = 1; } /* distribute training tensor with a coarse-grained decomposition */ sptensor_t * train = mpi_tt_read(train_fname, NULL, rinfo); if(train == NULL) { return SPLATT_ERROR_BADINPUT; } *train_out = train; *validate_out = NULL; if(validate_fname != NULL) { /* simple distribution for validate tensor */ sptensor_t * val_tmp = mpi_simple_distribute(validate_fname, MPI_COMM_WORLD); if(val_tmp == NULL) { tt_free(train); *train_out = NULL; return SPLATT_ERROR_BADINPUT; } /* now rearrange */ idx_t const bigmode = argmax_elem(train->dims, train->nmodes); idx_t * coarse_parts = calloc(rinfo->npes+1, sizeof(*coarse_parts)); coarse_parts[rinfo->rank] = rinfo->mat_start[bigmode]; MPI_Allreduce(MPI_IN_PLACE, coarse_parts, rinfo->npes, SPLATT_MPI_IDX, MPI_SUM, rinfo->comm_3d); coarse_parts[rinfo->npes] = train->dims[bigmode]; /* redistribute along largest mode */ int * parts = splatt_malloc(val_tmp->nnz * sizeof(*parts)); idx_t const * const bigind = val_tmp->ind[bigmode]; #pragma omp parallel for schedule(static) for(idx_t n=0; n < val_tmp->nnz; ++n) { for(int p=0; p < rinfo->npes; ++p) { if(coarse_parts[p+1] > bigind[n]) { parts[n] = p; break; } } } free(coarse_parts); /* rearrange validation nonzeros */ sptensor_t * validate = mpi_rearrange_by_part(val_tmp, parts,rinfo->comm_3d); *validate_out = validate; tt_free(val_tmp); splatt_free(parts); } return SPLATT_SUCCESS; } int mpi_tc_distribute_med( char const * const train_fname, char const * const validate_fname, idx_t const * const dims, sptensor_t * * train_out, sptensor_t * * validate_out, rank_info * const rinfo) { /* defaults if not otherwise specified */ if(dims == NULL) { rinfo->decomp = DEFAULT_MPI_DISTRIBUTION; for(idx_t m=0; m < MAX_NMODES; ++m) { rinfo->dims_3d[m] = 1; } } else { rinfo->decomp = SPLATT_DECOMP_MEDIUM; memcpy(rinfo->dims_3d, dims, MAX_NMODES * sizeof(*(rinfo->dims_3d))); } /* distribute training tensor with a medium-grained decomposition */ sptensor_t * train = mpi_tt_read(train_fname, NULL, rinfo); *train_out = train; if(train == NULL) { return SPLATT_ERROR_BADINPUT; } rinfo->decomp = SPLATT_DECOMP_MEDIUM; *validate_out = NULL; if(validate_fname != NULL) { /* simple distribution for validate tensor...we will redistribute to match * training distribution */ sptensor_t * val_tmp = mpi_simple_distribute(validate_fname, rinfo->comm_3d); if(val_tmp == NULL) { tt_free(train); *train_out = NULL; return SPLATT_ERROR_BADINPUT; } int * parts = splatt_malloc(val_tmp->nnz * sizeof(*parts)); #pragma omp parallel for schedule(static) for(idx_t n=0; n < val_tmp->nnz; ++n) { parts[n] = mpi_determine_med_owner(val_tmp, n, rinfo); } /* rearrange validation nonzeros */ sptensor_t * validate = mpi_rearrange_by_part(val_tmp, parts,rinfo->comm_3d); *validate_out = validate; tt_free(val_tmp); splatt_free(parts); /* now map validation indices to layer coordinates and fill in dims */ #pragma omp parallel for(idx_t m=0; m < validate->nmodes; ++m) { #pragma omp master validate->dims[m] = rinfo->layer_ends[m] - rinfo->layer_starts[m]; #pragma omp for schedule(static) nowait for(idx_t n=0; n < validate->nnz; ++n) { assert(validate->ind[m][n] >= rinfo->layer_starts[m]); assert(validate->ind[m][n] < rinfo->layer_ends[m]); validate->ind[m][n] -= rinfo->layer_starts[m]; } } } return SPLATT_SUCCESS; } tc_model * mpi_tc_model_alloc( sptensor_t const * const train, idx_t const rank, splatt_tc_type const which, permutation_t const * const perm, rank_info * const rinfo) { tc_model * model = splatt_malloc(sizeof(*model)); model->which = which; model->rank = rank; model->nmodes = train->nmodes; for(idx_t m=0; m < train->nmodes; ++m) { /* just allocate local factors */ model->dims[m] = train->dims[m]; model->factors[m] = splatt_malloc(model->dims[m] * rank * sizeof(**model->factors)); /* use mpi_mat_rand() to get consistent initializations across runs with * different numbers of MPI ranks */ model->globmats[m] = mpi_mat_rand(m, rank, perm, rinfo); } return model; } #endif
morn_image.c
/* Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com> Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "morn_image.h" struct HandleImageCreate { MImage *img; MChain *property; int64_t reserve[8]; int writeable; int cn; int height; int width; unsigned char **index; MMemory *memory; unsigned char **backup_index; MMemory *backup_memory; unsigned char **backup_data[MORN_MAX_IMAGE_CN]; int image_type; int border_type; }; void endImageCreate(struct HandleImageCreate *handle) { mException((handle->img == NULL),EXIT,"invalid image"); if(handle->property!=NULL) mChainRelease(handle->property); if(handle->index !=NULL) mFree(handle->index); if(handle->memory !=NULL) mMemoryRelease(handle->memory); if(handle->backup_index !=NULL) mFree(handle->backup_index); if(handle->backup_memory!=NULL) mMemoryRelease(handle->backup_memory); memset(handle->img,0,sizeof(MImage)); // mFree(((MList **)(handle->img))-1); } #define HASH_ImageCreate 0xccb34f86 MImage *ImageCreate(int cn,int height,int width,unsigned char **data[]) { if(cn <0) {cn = 0;} if(height <0) {height = 0;} if(width <0) {width = 0;} mException((cn>MORN_MAX_IMAGE_CN),EXIT,"invalid input"); MImage *img = (MImage *)ObjectAlloc(sizeof(MImage)); img->height = height;img->width = width;img->channel = cn; MHandle *hdl=mHandle(img,ImageCreate); struct HandleImageCreate *handle = (struct HandleImageCreate *)(hdl->handle); handle->img = img; mPropertyVariate(img,"image_type",&(handle->image_type)); if(cn==1) handle->image_type=MORN_IMAGE_GRAY; else if(cn==3) handle->image_type=MORN_IMAGE_RGB ; else if(cn==4) handle->image_type=MORN_IMAGE_RGBA; else handle->image_type=DFLT; mPropertyVariate(img,"border_type",&(handle->border_type)); handle->border_type=DFLT; if((cn==0)||(height == 0)) { mException((!INVALID_POINTER(data)),EXIT,"invalid input"); return img; } if(!INVALID_POINTER(data)) { memcpy(img->data,data,cn*sizeof(unsigned char **)); handle->border_type = MORN_BORDER_INVALID; return img; } int col = width + 32; int row = height+ 16; handle->index = (unsigned char **)mMalloc(row*cn*sizeof(unsigned char *)); handle->height = height; handle->cn = cn; if(width==0) { mException(!INVALID_POINTER(data),EXIT,"invalid input"); memset(handle->index,0,row*cn*sizeof(unsigned char *)); handle->border_type = MORN_BORDER_INVALID; return img; } if(handle->memory == NULL) handle->memory = mMemoryCreate(1,cn*row*col*sizeof(unsigned char),MORN_HOST); mException(handle->memory->num!=1,EXIT,"invalid image memory"); mMemoryIndex(handle->memory,cn*row,col*sizeof(unsigned char),(void ***)(&(handle->index)),1); handle->width = width; mPropertyFunction(img,"device",mornMemoryDevice,handle->memory); for(int i=0;i<cn*row;i++) handle->index[i] = &(handle->index[i][16]); for(int k=0;k<cn;k++) img->data[k] = handle->index + k*row+8; handle->border_type = MORN_BORDER_UNDEFINED; return img; } void ImageRedefine(MImage *img,int cn,int height,int width,unsigned char **data[]) { mException((INVALID_POINTER(img)),EXIT,"invalid input"); if(cn<=0) cn =img->channel; if(height <= 0) height=img->height; if(width<=0) width =img->width; if((cn!=img->channel)||(height!=img->height)||(width!=img->width)) mHandleReset(img); int same_size = ((cn<=img->channel)&&(height<=img->height)&&(width<=img->width)); int reuse = (data==img->data); int flag = (img->channel&&img->height&&img->width); mException((cn>MORN_MAX_IMAGE_CN),EXIT,"invalid input channel"); img->channel = cn; img->height = height; img->width = width; if(same_size&&reuse) return; struct HandleImageCreate *handle = (struct HandleImageCreate *)(ObjHandle(img,0)->handle); if(cn!= img->channel) { if(cn==1) handle->image_type=MORN_IMAGE_GRAY; else if(cn==3) handle->image_type=MORN_IMAGE_RGB ; else if(cn==4) handle->image_type=MORN_IMAGE_RGBA; } if(same_size&&INVALID_POINTER(data)&&(handle->width >0)) return; // mException(reuse&&flag&&(handle->width==0),EXIT,"invalid redefine"); handle->width = 0; img->border = NULL; handle->border_type = MORN_BORDER_UNDEFINED; if((height<=0)||(cn<=0)||(width<=0)) { mException((!INVALID_POINTER(data))&&(!reuse),EXIT,"invalid input"); memset(img->data,0,MORN_MAX_IMAGE_CN*sizeof(unsigned char*)); return; } if(reuse) data=NULL; int col = width + 32; int row = height+ 16; if(height*cn>handle->height*handle->cn) { if(handle->index!=NULL) mFree(handle->index); handle->index = NULL; } if(handle->index == NULL) { handle->index = (unsigned char **)mMalloc(row*cn*sizeof(unsigned char *)); handle->cn = cn; handle->height = height; } if(!INVALID_POINTER(data)) { memcpy(img->data,data,cn*sizeof(unsigned char **)); if(data == handle->backup_data) { if(!INVALID_POINTER(handle->backup_index )) mFree(handle->backup_index); if(!INVALID_POINTER(handle->backup_memory)) mMemoryRelease(handle->backup_memory); handle->backup_index=NULL; handle->backup_memory=NULL; } else handle->border_type = MORN_BORDER_INVALID; return; } if(handle->memory == NULL) { handle->memory = mMemoryCreate(1,cn*row*col*sizeof(unsigned char),MORN_HOST); mPropertyFunction(img,"device",mornMemoryDevice,handle->memory); } else mMemoryRedefine(handle->memory,1,cn*row*col*sizeof(unsigned char),DFLT); mException(handle->memory->num!=1,EXIT,"invalid image memory"); mMemoryIndex(handle->memory,cn*row,col*sizeof(unsigned char),(void ***)(&(handle->index)),1); for(int i=0;i<cn*row;i++) handle->index[i] = &(handle->index[i][16]); handle->width = width; for(int k=0;k<cn;k++) img->data[k] = handle->index + k*row+8; } void mImageRelease(MImage *img) { ObjectFree(img); } unsigned char ***mImageBackup(MImage *img,int cn,int height,int width) { if(cn <=0) cn =img->channel; if(height<=0) height=img->height; if(width <=0) width =img->width; int col = width + 32; int row = height+ 16; struct HandleImageCreate *handle = (struct HandleImageCreate *)(ObjHandle(img,0)->handle); if(handle->backup_index!=NULL) mFree(handle->backup_index); handle->backup_index = (unsigned char **)mMalloc(cn*row*sizeof(unsigned char *)); if(handle->backup_memory == NULL) handle->backup_memory = mMemoryCreate(1,cn*row*col*sizeof(unsigned char),MORN_HOST); else mMemoryRedefine(handle->backup_memory,1,cn*row*col*sizeof(unsigned char),DFLT); mException(handle->backup_memory->num!=1,EXIT,"invalid image backup memory"); mMemoryIndex(handle->backup_memory,cn*row,col*sizeof(unsigned char),(void ***)(&(handle->backup_index)),1); for(int i=0;i<cn*row;i++) handle->backup_index[i] = &(handle->backup_index[i][16]); for(int k=0;k<cn;k++) handle->backup_data[k] = handle->backup_index + k*row+8; return (handle->backup_data); } void mImageExpand(MImage *img,int r,int border_type) { mException((r>8)||(border_type < MORN_BORDER_BLACK)||(border_type > MORN_BORDER_REFLECT),EXIT,"invalid border type"); int *img_border_type = (int *)mPropertyRead(img,"border_type"); mException((*img_border_type == MORN_BORDER_INVALID),EXIT,"image expand is invalid"); #define EXPEND_LEFT(Y,X1) {\ if(border_type == MORN_BORDER_BLACK)\ memset(img->data[cn][Y]+X1-r,0,r*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_WHITE)\ memset(img->data[cn][Y]+X1-r,255,r*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_REPLICATE)\ memset(img->data[cn][Y]+X1-r,img->data[cn][Y][X1],r*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_REFLECT)\ for(int i=1;i<=r;i++) img->data[cn][Y][X1-i]=img->data[cn][Y][X1+i];\ } #define EXPEND_RIGHT(Y,X2) {\ if(border_type == MORN_BORDER_BLACK)\ memset(img->data[cn][Y]+X2+1,0,r*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_WHITE)\ memset(img->data[cn][Y]+X2+1,255,r*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_REPLICATE)\ memset(img->data[cn][Y]+X2+1,img->data[cn][Y][X2],r*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_REFLECT)\ for(int i=1;i<=r;i++) img->data[cn][Y][X2+i]=img->data[cn][Y][X2-i];\ } #define EXPEND_TOP(Y,Y1,X1,X2) {\ if(border_type == MORN_BORDER_BLACK)\ memset(img->data[cn][Y]+X1-r,0,(X2-X1+r+r)*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_WHITE)\ memset(img->data[cn][Y]+X1-r,255,(X2-X1+r+r)*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_REPLICATE)\ memcpy(img->data[cn][Y]+X1-r,img->data[cn][Y1]+X1-r,(X2-X1+r+r)*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_REFLECT)\ memcpy(img->data[cn][Y]+X1-r,img->data[cn][Y1+Y1-Y]+X1-r,(X2-X1+r+r)*sizeof(unsigned char));\ } #define EXPEND_BUTTOM(Y,Y2,X1,X2) {\ if(border_type == MORN_BORDER_BLACK)\ memset(img->data[cn][Y]+X1-r,0,(X2-X1+r+r)*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_WHITE)\ memset(img->data[cn][Y]+X1-r,255,(X2-X1+r+r)*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_REPLICATE)\ memcpy(img->data[cn][Y]+X1-r,img->data[cn][Y2]+X1-r,(X2-X1+r+r)*sizeof(unsigned char));\ else if(border_type == MORN_BORDER_REFLECT)\ memcpy(img->data[cn][Y]+X1-r,img->data[cn][Y2+Y2-Y]+X1-r,(X2-X1+r+r)*sizeof(unsigned char));\ } int height = img->height; int width = img->width; int y1 = ImageY1(img); int y2 = ImageY2(img)-1; if(*img_border_type == MORN_BORDER_IMAGE) { for(int cn=0;cn<img->channel;cn++) { int j; for(j=MAX(y1-r,0);j<y1;j++) {int x1 = ImageX1(img,y1); if(x1<r) EXPEND_LEFT(j,0); int x2 =ImageX2(img,y1); if(x2>width-r) EXPEND_RIGHT(j,width);} #pragma omp parallel for for(j=y1;j<=y2;j++) {int x1 = ImageX1(img,j ); if(x1<r) EXPEND_LEFT(j,0); int x2 =ImageX2(img,j ); if(x2>width-r) EXPEND_RIGHT(j,width);} for(j=y2+1;j<MIN(y2+r,height);j++) {int x1 = ImageX1(img,y2); if(x1<r) EXPEND_LEFT(j,0); int x2 =ImageX2(img,y2); if(x2>width-r) EXPEND_RIGHT(j,width);} int x1,x2; x1 = ImageX1(img,y1); x2 = ImageX2(img,y1); for(j=y1-r;j<0;j++) EXPEND_TOP(j, 0,x1,x2); x1 = ImageX1(img,y2); x2 = ImageX2(img,y2); for(j=height;j<=y2+r;j++) EXPEND_BUTTOM(j,(height-1),x1,x2); } return; } for(int cn=0;cn<img->channel;cn++) { int j; #pragma omp parallel for for(j=y1;j<=y2;j++) { int x1 = ImageX1(img,j); int x2 = ImageX2(img,j)-1; EXPEND_LEFT(j,x1); EXPEND_RIGHT(j,x2); } int x1,x2; x1 = ImageX1(img,y1); x2 = ImageX2(img,y1)-1; for(j=y1-r;j<y1;j++) EXPEND_TOP(j,y1,x1,x2); x1 = ImageX1(img,y2); x2 = ImageX2(img,y2)-1; for(j=y2+1;j<=y2+r;j++) EXPEND_BUTTOM(j,y2,x1,x2); } mPropertyWrite(img,"border_type",&border_type,sizeof(int)); } void m_ImageCut(MImage *img,MImage *dst,MImageRect *rect,MImagePoint *locate) { mException(INVALID_IMAGE(img)||INVALID_POINTER(rect),EXIT,"invalid input"); int x1=rect->x1;int x2=rect->x2;int y1=rect->y1;int y2=rect->y2; int lx,ly;if(INVALID_POINTER(locate)) {lx=0,ly=0;}else {lx=locate->x;ly=locate->y;} int height = ABS(y1-y2);int width = ABS(x1-x2); mException((height==0)||(width<=0),EXIT,"invalid input"); if(lx<0) {x1=(x1<x2)?(x1-lx):(x1+lx); lx=0;} else if(lx>0) {x2=(x1<x2)?(x2-lx):(x2+lx);} if(ly<0) {y1=(y1<y2)?(y1-ly):(y1+ly); ly=0;} else if(ly>0) {y2=(y1<y2)?(y2-ly):(y2+ly);} if((y1<0)&&(y2<0)) {return;} if((y1>=img->height)&&(y2>=img->height)) {return;} if((x1<0)&&(x2<0)) {return;} if((x1>=img->width )&&(x2>=img->width )) {return;} if(y1<0) {ly=ly-y1; y1=0;} else if(y2<0) {ly=ly-y2; y2=0;} if(x1<0) {lx=lx-x1; x1=0;} else if(x2<0) {lx=lx-x2; x2=0;} y1=MIN(y1,img->height);y2=MIN(y2,img->height); x1=MIN(x1,img->width );x2=MIN(x2,img->width ); int h = ABS(y1-y2);int w = ABS(x1-x2); if((h==0)||(w==0)) return; // printf("x1=%d,x2=%d,width=%d,w=%d\n",x1,x2,width,w); if(INVALID_POINTER(dst)) dst=img; unsigned char ***dst_data; if(dst!=img) { mImageRedefine(dst,img->channel,height,width); dst_data=dst->data; } else { if(( ly *img->width+lx <= y1*img->width+x1)&&( ly *img->width+lx+w <= y1*img->width+x2) &&((ly+h)*img->width+lx <= y2*img->width+x1)&&((ly+h)*img->width+lx+w <= y1*img->width+x2)) dst_data=img->data; else dst_data=mImageBackup(img,DFLT,height,width); } printf("x1 is %d,x2 is %d,y1 is %d,y2 is %d,lx=%d,ly=%d,height=%d,width=%d,h=%d,w=%d\n",x1,x2,y1,y2,lx,ly,height,width,h,w); if(x1<x2) { for(int c=0;c<img->channel;c++) for(int j=ly,y=y1;j<ly+h;j++,y+=((y2>y1)?1:-1)) memcpy(dst->data[c][j]+lx,img->data[c][y]+x1,w*sizeof(unsigned char)); } else { for(int c=0;c<img->channel;c++) for(int j=ly,y=y1;j<ly+h;j++,y+=((y2>y1)?1:-1)) for(int i=lx,x=x1;i<lx+w;i++,x--) dst->data[c][j][i]=img->data[c][y][x]; } if(img==dst) mImageRedefine(dst,DFLT,height,width,dst_data); } // void mImageCut(MImage *img,MImage *ROI,int x1,int x2,int y1,int y2,int lx,int ly) // { // mException(INVALID_IMAGE(img),EXIT,"invalid input"); // int flag = (x1<0)||(x2>=img->width)||(x1>=x2)||(y1<0)||(y2>=img->height)||(y1>=y2)||((img==ROI)&&(ly<y1)); // if(flag){ImageCut(img,ROI,x1,x2,y1,y2,lx,ly);return;} // int height=y2-y1;int width =x2-x1; // if(INVALID_POINTER(ROI)) ROI=img; // else mImageRedefine(ROI,img->channel,height,width); // if(lx<0) {x1=x1-lx;lx=0;} else if(lx>0) {x2=x2-lx;} // if(ly<0) {y1=y1-ly;ly=0;} else if(ly>0) {y2=y2-ly;} // // printf("x1 is %d,x2 is %d,y1 is %d,y2 is %d\n",x1,x2,y1,y2); // for(int j=y1;j<y2;j++)for(int cn=0;cn<ROI->channel;cn++) // memmove(ROI->data[cn][ly+j-y1]+lx,img->data[cn][j]+x1,MIN((ROI->width-lx),(x2-x1))*sizeof(unsigned char)); // } void mImageCopy(MImage *src,MImage *dst) { int i; if(src==dst) return; mException((INVALID_IMAGE(src)||INVALID_POINTER(dst)),EXIT,"invalid input"); mImageRedefine(dst,src->channel,src->height,src->width,dst->data); dst->border = src->border; // dst->info = src->info; for(int cn=0;cn<src->channel;cn++) { #pragma omp parallel for for(i=0;i<src->height;i++) memcpy(dst->data[cn][i],src->data[cn][i],src->width*sizeof(unsigned char)); } } // void m_ImageWipe(MImage *img,int channel) // { // char f[MORN_MAX_IMAGE_CN]; // if(channel<0) {memset(f,1,MORN_MAX_IMAGE_CN*sizeof(char));} // else {mException(channel>=img->channel,EXIT,"invalid input");memset(f,0,MORN_MAX_IMAGE_CN*sizeof(char));f[channel]=1;} // for(int c=0;c<img->channel;c++) // { // if(f[c]==0) continue; // for(int j=0;j<img->height;j++) // memset(img->data[c][j],0,img->width*sizeof(unsigned char)); // } // } void m_ImageWipe(MImage *img,int channel,MImageRect *rect) { char f[MORN_MAX_IMAGE_CN]; if(channel<0) {memset(f,1,MORN_MAX_IMAGE_CN*sizeof(char));} else {mException(channel>=img->channel,EXIT,"invalid input");memset(f,0,MORN_MAX_IMAGE_CN*sizeof(char));f[channel]=1;} int x,y,width,height; if(rect==NULL) {x=0;y=0;width=img->width;height=img->height;} else { x=MIN(rect->x1,rect->x2);y=MIN(rect->y1,rect->y2);width=ABS(rect->x2-rect->x1);height=ABS(rect->y2-rect->y1); if(x>=img->width ) {return;} if(x<0) {width =width +x;x=0;} if(y>=img->height) {return;} if(y<0) {height=height+y;y=0;} if(width >=img->width ) width =img->width -x; if(height>=img->height) height=img->height-y; } for(int c=0;c<img->channel;c++) { if(f[c]==0) continue; for(int j=y;j<y+height;j++) memset(img->data[c][j]+x,0,width*sizeof(unsigned char)); } } void mImageDiff(MImage *src1,MImage *src2,MImage *diff) { mException((INVALID_IMAGE(src1)||INVALID_IMAGE(src2)),EXIT,"invalid input"); mException(((src1->channel!=src2->channel)||(src1->height != src2->height)||(src1->width != src2->width)),EXIT,"invalid input"); if(INVALID_POINTER(diff)) diff = src1; else mImageRedefine(diff,src1->channel,src1->height,src1->width,diff->data); if(!INVALID_POINTER(src1->border)) diff->border = src1->border; else if(!INVALID_POINTER(src2->border)) diff->border = src2->border; for(int k=0;k<diff->channel;k++) { int j; #pragma omp parallel for for(j=ImageY1(diff);j<ImageY2(diff);j++) for(int i=ImageX1(diff,j);i<ImageX2(diff,j);i++) diff->data[k][j][i] = ABS(src1->data[k][j][i]-src2->data[k][j][i]); } } void mImageThreshold(MImage *src,MImage *dst,int *thresh,int value1,int value2) { mException((INVALID_IMAGE(src)),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst=src; if(value1<0) value1=0; if(value2<0) value2=255; for(int j=ImageY1(dst);j<ImageY2(dst);j++) for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { int c;for(c=0;c<src->channel;c++) if(src->data[c][j][i]<thresh[c]) break; dst->data[0][j][i]=(c==src->channel)?value2:value1; } dst->channel = 1; } void mImageDiffThreshold(MImage *src1,MImage *src2,MImage *diff,int thresh) { mException((INVALID_IMAGE(src1)||INVALID_IMAGE(src2)),EXIT,"invalid input"); mException(((src1->channel!=src2->channel)||(src1->height != src2->height)||(src1->width != src2->width)),EXIT,"invalid input"); if(INVALID_POINTER(diff)) diff = src1; else mImageRedefine(diff,1,src1->height,src1->width,diff->data); if(!INVALID_POINTER(src1->border)) diff->border = src1->border; else if(!INVALID_POINTER(src2->border)) diff->border = src2->border; int j; #pragma omp parallel for for(j=ImageY1(diff);j<ImageY2(diff);j++) for(int i=ImageX1(diff,j);i<ImageX2(diff,j);i++) { diff->data[0][j][i] = 0; for(int k=0;k<src1->channel;k++) { if(ABS(src1->data[k][j][i]-src2->data[k][j][i])>thresh) { diff->data[0][j][i] = 255; break; } } } diff->channel = 1; } void mImageDataAdd(MImage *src1,MImage *src2,MImage *dst) { mException((INVALID_IMAGE(src1)||INVALID_IMAGE(src2)),EXIT,"invalid input"); mException(((src1->height != src2->height)||(src1->width != src2->width)),EXIT,"invalid input"); mException((src2->channel!=src1->channel)&&(src2->channel!=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) {dst = src1;} else {mImageRedefine(dst,src1->channel,src1->height,src1->width,dst->data);} if(!INVALID_POINTER(src1->border)) dst->border = src1->border; else if(!INVALID_POINTER(src2->border)) dst->border = src2->border; unsigned char **data1,**data2; for(int k=0;k<dst->channel;k++) { data1 = src1->data[k]; data2 = (k>=src2->channel)?src2->data[0]:src2->data[k]; int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { register int data = data1[j][i]+data2[j][i]; dst->data[k][j][i] = (data>255)?255:data; } } } void mImageDataWeightAdd(MImage *src1,MImage *src2,MImage *dst,float w1,float w2) { mException((INVALID_IMAGE(src1)||INVALID_IMAGE(src2)),EXIT,"invalid input"); mException(((src1->height != src2->height)||(src1->width != src2->width)),EXIT,"invalid input"); mException((src2->channel!=src1->channel)&&(src2->channel!=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) {dst = src1;} else {mImageRedefine(dst,src1->channel,src1->height,src1->width,dst->data);} if(!INVALID_POINTER(src1->border)) dst->border = src1->border; else if(!INVALID_POINTER(src2->border)) dst->border = src2->border; int v1[256];int v2[256]; for(int i=0;i<256;i++){v1[i]=(int)((float)i*w1); v2[i]=(int)((float)i*w2);} unsigned char **data1,**data2; for(int k=0;k<dst->channel;k++) { data1 = src1->data[k]; data2 = (k>=src2->channel)?src2->data[0]:src2->data[k]; int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { register int data = v1[data1[j][i]]+v2[data2[j][i]]; dst->data[k][j][i] = (data>255)?255:((data<0)?0:data); } } } void mImageDataSub(MImage *src1,MImage *src2,MImage *dst) { mException((INVALID_IMAGE(src1)||INVALID_IMAGE(src2)),EXIT,"invalid input"); mException(((src1->height != src2->height)||(src1->width != src2->width)),EXIT,"invalid input"); mException((src2->channel!=src1->channel)&&(src2->channel!=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) {dst = src1;} else {mImageRedefine(dst,src1->channel,src1->height,src1->width,dst->data);} if(!INVALID_POINTER(src1->border)) dst->border = src1->border; else if(!INVALID_POINTER(src2->border)) dst->border = src2->border; unsigned char **data1,**data2; for(int k=0;k<dst->channel;k++) { data1 = src1->data[k]; data2 = (k>=src2->channel)?src2->data[0]:src2->data[k]; int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { register int data = data1[j][i]-data2[j][i]; dst->data[k][j][i] = (data<0)?0:data; } } } void mImageDataAnd(MImage *src1,MImage *src2,MImage *dst) { mException((INVALID_IMAGE(src1)||INVALID_IMAGE(src2)),EXIT,"invalid input"); mException(((src1->height != src2->height)||(src1->width != src2->width)),EXIT,"invalid input"); mException((src2->channel!=src1->channel)&&(src2->channel!=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) {dst = src1;} if(dst!=src1) {mImageRedefine(dst,src1->channel,src1->height,src1->width,dst->data);} if(!INVALID_POINTER(src1->border)) dst->border = src1->border; else if(!INVALID_POINTER(src2->border)) dst->border = src2->border; unsigned char **data1,**data2; for(int k=0;k<dst->channel;k++) { data1 = src1->data[k]; data2 = (k>=src2->channel)?src2->data[0]:src2->data[k]; int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) dst->data[k][j][i] = data1[j][i]&data2[j][i]; } } void mImageDataOr(MImage *src1,MImage *src2,MImage *dst) { mException((INVALID_IMAGE(src1)||INVALID_IMAGE(src2)),EXIT,"invalid input"); mException(((src1->height != src2->height)||(src1->width != src2->width)),EXIT,"invalid input"); mException((src2->channel!=src1->channel)&&(src2->channel!=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) {dst = src1;} else {mImageRedefine(dst,src1->channel,src1->height,src1->width,dst->data);} if(!INVALID_POINTER(src1->border)) dst->border = src1->border; else if(!INVALID_POINTER(src2->border)) dst->border = src2->border; unsigned char **data1,**data2; for(int k=0;k<dst->channel;k++) { data1 = src1->data[k]; data2 = (k>=src2->channel)?src2->data[0]:src2->data[k]; int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) dst->data[k][j][i] = data1[j][i]|data2[j][i]; } } void mImageInvert(MImage *src,MImage *dst) { mException((INVALID_IMAGE(src)),EXIT,"invalid input"); if(INVALID_POINTER(dst)) {dst = src;} else {mImageRedefine(dst,src->channel,src->height,src->width,dst->data);} if(!INVALID_POINTER(src->border)) dst->border = src->border; for(int k=0;k<dst->channel;k++) { int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) dst->data[k][j][i] = 255-src->data[k][j][i]; } dst->border = src->border; } void mImageLinearMap(MImage *src,MImage *dst,float k,float b) { mException((INVALID_IMAGE(src)),EXIT,"invalid input"); if(INVALID_POINTER(dst)) {dst = src;} else {mImageRedefine(dst,src->channel,src->height,src->width,dst->data);} if(!INVALID_POINTER(src->border)) dst->border = src->border; unsigned char data[256]; for(int i=0;i<256;i++) { float rst = ((float)i)*k+b; if(rst>255.0f) data[i] = 255; else if(rst<0.0f) data[i] = 0; else data[i] = (unsigned char)(rst+0.5f); } for(int cn=0;cn<dst->channel;cn++) { int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) dst->data[cn][j][i] = data[src->data[cn][j][i]]; } dst->border = src->border; } void mImageOperate(MImage *src,MImage *dst,void (*func)(unsigned char *,unsigned char *,void *),void *para) { mException((INVALID_IMAGE(src)),EXIT,"invalid input"); if(INVALID_POINTER(dst)) {dst = src;} else { mException((dst->channel<=0),EXIT,"invalid input dst channel"); mImageRedefine(dst,dst->channel,src->height,src->width,dst->data); } if(!INVALID_POINTER(src->border)) dst->border = src->border; unsigned char idata[MORN_MAX_IMAGE_CN]; unsigned char odata[MORN_MAX_IMAGE_CN]; int j; // #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { for(int cn=0;cn<src->channel;cn++) idata[cn]=src->data[cn][j][i]; func(idata,odata,para); for(int cn=0;cn<dst->channel;cn++) dst->data[cn][j][i]=odata[cn]; } dst->border = src->border; } struct HandleImageChannelSplit { int flag[64]; MImage *img[64]; int n; }; void endImageChannelSplit(struct HandleImageChannelSplit *handle) { for(int i=0;i<64;i++) { if(handle->img[i]!=NULL) mImageRelease(handle->img[i]); } } #define HASH_ImageChannelSplit 0x41e09e5b MImage *mImageChannelSplit(MImage *src,int num,...) { mException(INVALID_IMAGE(src),EXIT,"invalid input image"); mException((num>=4),EXIT,"invalid input"); MHandle *hdl=mHandle(src,ImageChannelSplit); struct HandleImageChannelSplit *handle = (struct HandleImageChannelSplit *)(hdl->handle); hdl->valid = 1; int cn[4]={0,0,0,0}; unsigned char **data[4]; va_list para; va_start(para,num); for(int i=0;i<num;i++) {cn[i]=va_arg(para,int);mException((cn[i]>=4),EXIT,"invalid input channel");} va_end(para); int flag = ((cn[0]+1)<<24)+((cn[1]+1)<<16)+((cn[2]+1)<<8)+(cn[3]+1); for(int i=0;i<handle->n;i++) if(flag==handle->flag[i]) return handle->img[i]; int n=handle->n; handle->img [n]=mImageCreate(num,src->height,src->width,data); handle->flag[n]=flag; handle->n=n+1; return handle->img[n]; }
cpi_omp.c
#include <stdio.h> #include <sys/time.h> #include <omp.h> static long nSteps = 1000000000; static double PI25DT = 3.141592653589793238462643; double getTime(void) { struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); return tv.tv_sec + 1e-6*(double)tv.tv_usec; } int main(int argc, char** argv){ double pi, h, sum, x; double tStart, tElapsed; h = 1.0 / (double) nSteps; sum = 0.0; tStart = getTime(); #pragma omp parallel { printf("Thread rank: %d\n", omp_get_thread_num()); } #pragma omp parallel for reduction(+:sum) for (int i = 0 ; i < nSteps; i ++) { x = h * ((double)i - 0.5); sum += 4.0 / (1.0 + x*x); } pi = h * sum; tElapsed = getTime() - tStart; printf("pi is approximately %.16f, Error is %.16f\n", pi, pi - PI25DT); printf("Computed in %.4g seconds\n", tElapsed); return 0; }
papi.c
#include "XSbench_header.h" void counter_init( int *eventset, int *num_papi_events ) { char error_str[PAPI_MAX_STR_LEN]; // int events[] = {PAPI_TOT_INS,PAPI_BR_INS,PAPI_SR_INS}; int events[] = {PAPI_TOT_CYC,PAPI_L3_TCM}; int stat; int thread = omp_get_thread_num(); if( thread == 0 ) printf("Initializing PAPI counters...\n"); *num_papi_events = sizeof(events) / sizeof(int); if ((stat = PAPI_thread_init((long unsigned int (*)(void)) omp_get_thread_num)) != PAPI_OK){ PAPI_perror("PAPI_thread_init"); exit(1); } if ( (stat= PAPI_create_eventset(eventset)) != PAPI_OK){ PAPI_perror("PAPI_create_eventset"); exit(1); } for( int i = 0; i < *num_papi_events; i++ ){ if ((stat=PAPI_add_event(*eventset,events[i])) != PAPI_OK){ PAPI_perror("PAPI_add_event"); exit(1); } } if ((stat=PAPI_start(*eventset)) != PAPI_OK){ PAPI_perror("PAPI_start"); exit(1); } } // Stops the papi counters and prints results void counter_stop( int * eventset, int num_papi_events ) { int * events = malloc(num_papi_events * sizeof(int)); int n = num_papi_events; PAPI_list_events( *eventset, events, &n ); PAPI_event_info_t info; long_long * values = malloc( num_papi_events * sizeof(long_long)); PAPI_stop(*eventset, values); int thread = omp_get_thread_num(); #pragma omp critical (papi) { printf("Thread %d\n", thread); for( int i = 0; i < num_papi_events; i++ ) { PAPI_get_event_info(events[i], &info); printf("%-15lld\t%s\t%s\n", values[i],info.symbol,info.long_descr); } free(events); free(values); } }
papi.c
#include "rsbench.h" void counter_init( int *eventset, int *num_papi_events ) { char error_str[PAPI_MAX_STR_LEN]; int stat; ///////////////////////////////////////////////////////////////////////// // PAPI EVENT SELECTION ///////////////////////////////////////////////////////////////////////// // User can comment/uncomment blocks as they see fit within this seciton // Some Standard Events //int events[] = {PAPI_TOT_INS,PAPI_LD_INS,PAPI_FP_INS}; // Bandwidth Used // ((PAPI_Lx_TCM * Lx_linesize) / PAPI_TOT_CYC) * Clock(MHz) //int events[] = {PAPI_L3_TCM, PAPI_TOT_CYC}; // L3 Total Cache Miss Ratio // PAPI_L3_TCM / PAPI_L3_TCA // (On Xeon dual octo - 65%, not dependent on # of threads) //int events[] = {PAPI_L3_TCM, PAPI_L3_TCA}; // % Cycles with no instruction use // PAPI_STL_ICY / PAPI_TOT_CYC //int events[] = { PAPI_STL_ICY, PAPI_TOT_CYC }; // % Branch instructions Mispredicted // PAPI_BR_MSP / PAPI_BR_CN //int events[] = { PAPI_BR_MSP, PAPI_BR_CN, PAPI_BR_PRC }; // TLB Misses //int events[] = { PAPI_TLB_DM }; // MFlops // (PAPI_FP_INS/PAPI_TOT_CYC) * Clock(MHz) //int events[] = { PAPI_FP_INS, PAPI_TOT_CYC }; // MFlops (Alternate?) // (PAPI_FP_INS/PAPI_TOT_CYC) * Clock(MHz) int events[] = { PAPI_DP_OPS, PAPI_TOT_CYC }; // TLB misses (Using native counters) /* int events[2]; int EventCode; char * event1 = "perf::DTLB-LOADS"; char * event2 = "perf::DTLB-LOAD-MISSES"; PAPI_event_name_to_code( event1, &EventCode ); events[0] = EventCode; PAPI_event_name_to_code( event2, &EventCode ); events[1] = EventCode; */ /* // Stalled Cycles, front v back (Using native counters) int events[3]; int EventCode; char * event1 = "perf::STALLED-CYCLES-FRONTEND"; char * event2 = "perf::STALLED-CYCLES-BACKEND"; char * event3 = "perf::PERF_COUNT_HW_CPU_CYCLES"; PAPI_event_name_to_code( event1, &EventCode ); events[0] = EventCode; PAPI_event_name_to_code( event2, &EventCode ); events[1] = EventCode; PAPI_event_name_to_code( event3, &EventCode ); events[2] = EventCode; */ /* // LLC Cache Misses (Using native counters) int events[2]; int EventCode; char * event1 = "ix86arch::LLC_REFERENCES"; char * event2 = "ix86arch::LLC_MISSES"; PAPI_event_name_to_code( event1, &EventCode ); events[0] = EventCode; PAPI_event_name_to_code( event2, &EventCode ); events[1] = EventCode; */ /* // Node Prefetch Misses (Using native counters) int events[1]; int EventCode; //char * event1 = "perf::NODE-PREFETCHES"; //char * event2 = "perf::NODE-PREFETCH-MISSES"; char * event1 = "perf::NODE-PREFETCHES"; char * event2 = "perf::NODE-LOAD-MISSES:COUNT"; //PAPI_event_name_to_code( event1, &EventCode ); //events[0] = EventCode; PAPI_event_name_to_code( event2, &EventCode ); events[0] = EventCode; */ /* // CPU Stalls Due to lack of Load Buffers (Using native counters) int events[2]; int EventCode; char * event1 = "RESOURCE_STALLS:LB"; char * event2 = "perf::PERF_COUNT_HW_CPU_CYCLES"; PAPI_event_name_to_code( event1, &EventCode ); events[0] = EventCode; PAPI_event_name_to_code( event2, &EventCode ); events[1] = EventCode; */ /* // CPU Stalls Due to ANY Resource (Using native counters) int events[2]; int EventCode; char * event1 = "RESOURCE_STALLS:ANY"; char * event2 = "PAPI_TOT_CYC"; PAPI_event_name_to_code( event1, &EventCode ); events[0] = EventCode; PAPI_event_name_to_code( event2, &EventCode ); events[1] = EventCode; */ /* // CPU Stalls at Reservation Station (Using native counters) int events[2]; int EventCode; char * event1 = "RESOURCE_STALLS:RS"; char * event2 = "perf::PERF_COUNT_HW_CPU_CYCLES"; PAPI_event_name_to_code( event1, &EventCode ); events[0] = EventCode; PAPI_event_name_to_code( event2, &EventCode ); events[1] = EventCode; */ /* // CPU Stall Reason Breakdown (Using native counters) int events[4]; int EventCode; char * event1 = "RESOURCE_STALLS:ANY"; char * event2 = "RESOURCE_STALLS:LB"; char * event3 = "RESOURCE_STALLS:RS"; char * event4 = "RESOURCE_STALLS:SB"; // Set 1 // Set 2 char * event1 = "RESOURCE_STALLS:ANY"; char * event2 = "RESOURCE_STALLS:ROB"; char * event3 = "RESOURCE_STALLS:MEM_RS"; char * event4 = "RESOURCE_STALLS2:ALL_FL_EMPTY"; // Set 2 // Set 3 char * event1 = "RESOURCE_STALLS:ANY"; char * event2 = "RESOURCE_STALLS2:ALL_PRF_CONTROL"; char * event3 = "RESOURCE_STALLS2:ANY_PRF_CONTROL"; // duplicate char * event4 = "RESOURCE_STALLS2:OOO_RSRC"; // Set 3 // Events that don't need to be counted // Don't bother measuring these //char * event1 = "RESOURCE_STALLS:FCSW"; // Always 0, don't measure //char * event1 = "RESOURCE_STALLS:MXCSR"; // Always 0, don't measure //char * event3 = "RESOURCE_STALLS2:BOB_FULL"; // Always trivial //char * event3 = "RESOURCE_STALLS2:ANY_PRF_CONTROL"; // duplicate PAPI_event_name_to_code( event1, &EventCode ); events[0] = EventCode; PAPI_event_name_to_code( event2, &EventCode ); events[1] = EventCode; PAPI_event_name_to_code( event3, &EventCode ); events[2] = EventCode; PAPI_event_name_to_code( event4, &EventCode ); events[3] = EventCode; */ ///////////////////////////////////////////////////////////////////////// // PAPI EVENT LOADING ///////////////////////////////////////////////////////////////////////// // Users should not need to alter anything within this section int thread = omp_get_thread_num(); if( thread == 0 ) printf("Initializing PAPI counters...\n"); *num_papi_events = sizeof(events) / sizeof(int); if ((stat = PAPI_thread_init((long unsigned int (*)(void)) omp_get_thread_num)) != PAPI_OK){ PAPI_perror("PAPI_thread_init"); exit(1); } if ( (stat= PAPI_create_eventset(eventset)) != PAPI_OK){ PAPI_perror("PAPI_create_eventset"); exit(1); } for( int i = 0; i < *num_papi_events; i++ ){ if ((stat=PAPI_add_event(*eventset,events[i])) != PAPI_OK){ PAPI_perror("PAPI_add_event"); exit(1); } } if ((stat=PAPI_start(*eventset)) != PAPI_OK){ PAPI_perror("PAPI_start"); exit(1); } } /* void counter_init( int *eventset, int *num_papi_events ) { char error_str[PAPI_MAX_STR_LEN]; // int events[] = {PAPI_TOT_INS,PAPI_BR_INS,PAPI_SR_INS}; int events[] = {PAPI_TOT_INS,PAPI_LD_INS,PAPI_FP_INS}; int events[] = {ix86arch::LLC_REFERENCES, int stat; int thread = omp_get_thread_num(); if( thread == 0 ) printf("Initializing PAPI counters...\n"); *num_papi_events = sizeof(events) / sizeof(int); if ((stat = PAPI_thread_init((long unsigned int (*)(void)) omp_get_thread_num)) != PAPI_OK){ PAPI_perror("PAPI_thread_init"); exit(1); } if ( (stat= PAPI_create_eventset(eventset)) != PAPI_OK){ PAPI_perror("PAPI_create_eventset"); exit(1); } for( int i = 0; i < *num_papi_events; i++ ){ if ((stat=PAPI_add_event(*eventset,events[i])) != PAPI_OK){ PAPI_perror("PAPI_add_event"); exit(1); } } if ((stat=PAPI_start(*eventset)) != PAPI_OK){ PAPI_perror("PAPI_start"); exit(1); } } */ // Stops the papi counters and prints results void counter_stop( int * eventset, int num_papi_events ) { int * events = malloc(num_papi_events * sizeof(int)); int n = num_papi_events; PAPI_list_events( *eventset, events, &n ); PAPI_event_info_t info; long_long * values = malloc( num_papi_events * sizeof(long_long)); PAPI_stop(*eventset, values); int thread = omp_get_thread_num(); #pragma omp critical (papi) { printf("Thread %d\n", thread); for( int i = 0; i < num_papi_events; i++ ) { PAPI_get_event_info(events[i], &info); printf("%-15lld\t%s\t%s\n", values[i],info.symbol,info.long_descr); } free(events); free(values); } }
hello.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define DEF_N 10 int main(int argc, char **argv) { int def_nthreads, nthreads, tid; if (argc > 1) { def_nthreads = atoi(argv[1]); } else { def_nthreads = DEF_N; } omp_set_num_threads(def_nthreads); #pragma omp parallel private(nthreads, tid) { tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } return 0; }
DRACC_OMP_008_Counter_no_lock_simd_Intra_yes.c
/* Concurrent access on a counter with no lock with simd. Atomicity Violation. Data Race in line 28. Intra Region. */ #include <stdio.h> #include <stdbool.h> #define N 100000 #define C 64 int countervar[C]; int init(){ for(int i=0; i<C; i++){ countervar[i]=0; } return 0; } int count(){ #pragma omp target map(tofrom:countervar[0:C]) device(0) #pragma omp teams num_teams(1) #pragma omp distribute parallel for for (int i=0; i<N; i++){ #pragma omp simd for(int i=0; i<C; i++){ countervar[i]++; } } return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(countervar[i]!=100000){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ init(); count(); check(); return 0; }
openmp_2k.c
// C program for implementation of selection sort #include <stdio.h> #include <time.h> #include "omp.h" void swap(int *xp, int *yp) { int temp = *xp; *xp = *yp; *yp = temp; } void selectionSort(int arr[], int n) { int i=0, j=0, min_i=0; // One by one move boundary of unsorted subarray for (i = 0; i < n-1; ++i) { // Find the minimum element in unsorted array min_i=i; #pragma omp parallel for reduction(min:min_i) for (j = i+1; j < n; ++j){ min_i = arr[j]; } #pragma omp parallel for for (j = i+1; j < n; ++j) { if (arr[j] == min_i) min_i = j; } // Swap the found minimum element with the first element //offset so first element is in the correct place if(i>0 && min_i!=i && i<=(n/2)) swap(&arr[min_i], &arr[i-1]); } } /* Function to print an array */ void printArray(int arr[], int size) { int i; for (i=0; i < size; i++) printf("%d ", arr[i]); printf("\n"); } // Driver program to test above functions int main() { double begin,end; begin=omp_get_wtime(); //really big array...inefficient with serial code int arr[] = {2000, 1999, 1998, 1997, 1996, 1995, 1994, 1993, 1992, 1991, 1990, 1989, 1988, 1987, 1986, 1985, 1984, 1983, 1982, 1981, 1980, 1979, 1978, 1977, 1976, 1975, 1974, 1973, 1972, 1971, 1970, 1969, 1968, 1967, 1966, 1965, 1964, 1963, 1962, 1961, 1960, 1959, 1958, 1957, 1956, 1955, 1954, 1953, 1952, 1951, 1950, 1949, 1948, 1947, 1946, 1945, 1944, 1943, 1942, 1941, 1940, 1939, 1938, 1937, 1936, 1935, 1934, 1933, 1932, 1931, 1930, 1929, 1928, 1927, 1926, 1925, 1924, 1923, 1922, 1921, 1920, 1919, 1918, 1917, 1916, 1915, 1914, 1913, 1912, 1911, 1910, 1909, 1908, 1907, 1906, 1905, 1904, 1903, 1902, 1901, 1900, 1899, 1898, 1897, 1896, 1895, 1894, 1893, 1892, 1891, 1890, 1889, 1888, 1887, 1886, 1885, 1884, 1883, 1882, 1881, 1880, 1879, 1878, 1877, 1876, 1875, 1874, 1873, 1872, 1871, 1870, 1869, 1868, 1867, 1866, 1865, 1864, 1863, 1862, 1861, 1860, 1859, 1858, 1857, 1856, 1855, 1854, 1853, 1852, 1851, 1850, 1849, 1848, 1847, 1846, 1845, 1844, 1843, 1842, 1841, 1840, 1839, 1838, 1837, 1836, 1835, 1834, 1833, 1832, 1831, 1830, 1829, 1828, 1827, 1826, 1825, 1824, 1823, 1822, 1821, 1820, 1819, 1818, 1817, 1816, 1815, 1814, 1813, 1812, 1811, 1810, 1809, 1808, 1807, 1806, 1805, 1804, 1803, 1802, 1801, 1800, 1799, 1798, 1797, 1796, 1795, 1794, 1793, 1792, 1791, 1790, 1789, 1788, 1787, 1786, 1785, 1784, 1783, 1782, 1781, 1780, 1779, 1778, 1777, 1776, 1775, 1774, 1773, 1772, 1771, 1770, 1769, 1768, 1767, 1766, 1765, 1764, 1763, 1762, 1761, 1760, 1759, 1758, 1757, 1756, 1755, 1754, 1753, 1752, 1751, 1750, 1749, 1748, 1747, 1746, 1745, 1744, 1743, 1742, 1741, 1740, 1739, 1738, 1737, 1736, 1735, 1734, 1733, 1732, 1731, 1730, 1729, 1728, 1727, 1726, 1725, 1724, 1723, 1722, 1721, 1720, 1719, 1718, 1717, 1716, 1715, 1714, 1713, 1712, 1711, 1710, 1709, 1708, 1707, 1706, 1705, 1704, 1703, 1702, 1701, 1700, 1699, 1698, 1697, 1696, 1695, 1694, 1693, 1692, 1691, 1690, 1689, 1688, 1687, 1686, 1685, 1684, 1683, 1682, 1681, 1680, 1679, 1678, 1677, 1676, 1675, 1674, 1673, 1672, 1671, 1670, 1669, 1668, 1667, 1666, 1665, 1664, 1663, 1662, 1661, 1660, 1659, 1658, 1657, 1656, 1655, 1654, 1653, 1652, 1651, 1650, 1649, 1648, 1647, 1646, 1645, 1644, 1643, 1642, 1641, 1640, 1639, 1638, 1637, 1636, 1635, 1634, 1633, 1632, 1631, 1630, 1629, 1628, 1627, 1626, 1625, 1624, 1623, 1622, 1621, 1620, 1619, 1618, 1617, 1616, 1615, 1614, 1613, 1612, 1611, 1610, 1609, 1608, 1607, 1606, 1605, 1604, 1603, 1602, 1601, 1600, 1599, 1598, 1597, 1596, 1595, 1594, 1593, 1592, 1591, 1590, 1589, 1588, 1587, 1586, 1585, 1584, 1583, 1582, 1581, 1580, 1579, 1578, 1577, 1576, 1575, 1574, 1573, 1572, 1571, 1570, 1569, 1568, 1567, 1566, 1565, 1564, 1563, 1562, 1561, 1560, 1559, 1558, 1557, 1556, 1555, 1554, 1553, 1552, 1551, 1550, 1549, 1548, 1547, 1546, 1545, 1544, 1543, 1542, 1541, 1540, 1539, 1538, 1537, 1536, 1535, 1534, 1533, 1532, 1531, 1530, 1529, 1528, 1527, 1526, 1525, 1524, 1523, 1522, 1521, 1520, 1519, 1518, 1517, 1516, 1515, 1514, 1513, 1512, 1511, 1510, 1509, 1508, 1507, 1506, 1505, 1504, 1503, 1502, 1501, 1500, 1499, 1498, 1497, 1496, 1495, 1494, 1493, 1492, 1491, 1490, 1489, 1488, 1487, 1486, 1485, 1484, 1483, 1482, 1481, 1480, 1479, 1478, 1477, 1476, 1475, 1474, 1473, 1472, 1471, 1470, 1469, 1468, 1467, 1466, 1465, 1464, 1463, 1462, 1461, 1460, 1459, 1458, 1457, 1456, 1455, 1454, 1453, 1452, 1451, 1450, 1449, 1448, 1447, 1446, 1445, 1444, 1443, 1442, 1441, 1440, 1439, 1438, 1437, 1436, 1435, 1434, 1433, 1432, 1431, 1430, 1429, 1428, 1427, 1426, 1425, 1424, 1423, 1422, 1421, 1420, 1419, 1418, 1417, 1416, 1415, 1414, 1413, 1412, 1411, 1410, 1409, 1408, 1407, 1406, 1405, 1404, 1403, 1402, 1401, 1400, 1399, 1398, 1397, 1396, 1395, 1394, 1393, 1392, 1391, 1390, 1389, 1388, 1387, 1386, 1385, 1384, 1383, 1382, 1381, 1380, 1379, 1378, 1377, 1376, 1375, 1374, 1373, 1372, 1371, 1370, 1369, 1368, 1367, 1366, 1365, 1364, 1363, 1362, 1361, 1360, 1359, 1358, 1357, 1356, 1355, 1354, 1353, 1352, 1351, 1350, 1349, 1348, 1347, 1346, 1345, 1344, 1343, 1342, 1341, 1340, 1339, 1338, 1337, 1336, 1335, 1334, 1333, 1332, 1331, 1330, 1329, 1328, 1327, 1326, 1325, 1324, 1323, 1322, 1321, 1320, 1319, 1318, 1317, 1316, 1315, 1314, 1313, 1312, 1311, 1310, 1309, 1308, 1307, 1306, 1305, 1304, 1303, 1302, 1301, 1300, 1299, 1298, 1297, 1296, 1295, 1294, 1293, 1292, 1291, 1290, 1289, 1288, 1287, 1286, 1285, 1284, 1283, 1282, 1281, 1280, 1279, 1278, 1277, 1276, 1275, 1274, 1273, 1272, 1271, 1270, 1269, 1268, 1267, 1266, 1265, 1264, 1263, 1262, 1261, 1260, 1259, 1258, 1257, 1256, 1255, 1254, 1253, 1252, 1251, 1250, 1249, 1248, 1247, 1246, 1245, 1244, 1243, 1242, 1241, 1240, 1239, 1238, 1237, 1236, 1235, 1234, 1233, 1232, 1231, 1230, 1229, 1228, 1227, 1226, 1225, 1224, 1223, 1222, 1221, 1220, 1219, 1218, 1217, 1216, 1215, 1214, 1213, 1212, 1211, 1210, 1209, 1208, 1207, 1206, 1205, 1204, 1203, 1202, 1201, 1200, 1199, 1198, 1197, 1196, 1195, 1194, 1193, 1192, 1191, 1190, 1189, 1188, 1187, 1186, 1185, 1184, 1183, 1182, 1181, 1180, 1179, 1178, 1177, 1176, 1175, 1174, 1173, 1172, 1171, 1170, 1169, 1168, 1167, 1166, 1165, 1164, 1163, 1162, 1161, 1160, 1159, 1158, 1157, 1156, 1155, 1154, 1153, 1152, 1151, 1150, 1149, 1148, 1147, 1146, 1145, 1144, 1143, 1142, 1141, 1140, 1139, 1138, 1137, 1136, 1135, 1134, 1133, 1132, 1131, 1130, 1129, 1128, 1127, 1126, 1125, 1124, 1123, 1122, 1121, 1120, 1119, 1118, 1117, 1116, 1115, 1114, 1113, 1112, 1111, 1110, 1109, 1108, 1107, 1106, 1105, 1104, 1103, 1102, 1101, 1100, 1099, 1098, 1097, 1096, 1095, 1094, 1093, 1092, 1091, 1090, 1089, 1088, 1087, 1086, 1085, 1084, 1083, 1082, 1081, 1080, 1079, 1078, 1077, 1076, 1075, 1074, 1073, 1072, 1071, 1070, 1069, 1068, 1067, 1066, 1065, 1064, 1063, 1062, 1061, 1060, 1059, 1058, 1057, 1056, 1055, 1054, 1053, 1052, 1051, 1050, 1049, 1048, 1047, 1046, 1045, 1044, 1043, 1042, 1041, 1040, 1039, 1038, 1037, 1036, 1035, 1034, 1033, 1032, 1031, 1030, 1029, 1028, 1027, 1026, 1025, 1024, 1023, 1022, 1021, 1020, 1019, 1018, 1017, 1016, 1015, 1014, 1013, 1012, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 1000, 999, 998, 997, 996, 995, 994, 993, 992, 991, 990, 989, 988, 987, 986, 985, 984, 983, 982, 981, 980, 979, 978, 977, 976, 975, 974, 973, 972, 971, 970, 969, 968, 967, 966, 965, 964, 963, 962, 961, 960, 959, 958, 957, 956, 955, 954, 953, 952, 951, 950, 949, 948, 947, 946, 945, 944, 943, 942, 941, 940, 939, 938, 937, 936, 935, 934, 933, 932, 931, 930, 929, 928, 927, 926, 925, 924, 923, 922, 921, 920, 919, 918, 917, 916, 915, 914, 913, 912, 911, 910, 909, 908, 907, 906, 905, 904, 903, 902, 901, 900, 899, 898, 897, 896, 895, 894, 893, 892, 891, 890, 889, 888, 887, 886, 885, 884, 883, 882, 881, 880, 879, 878, 877, 876, 875, 874, 873, 872, 871, 870, 869, 868, 867, 866, 865, 864, 863, 862, 861, 860, 859, 858, 857, 856, 855, 854, 853, 852, 851, 850, 849, 848, 847, 846, 845, 844, 843, 842, 841, 840, 839, 838, 837, 836, 835, 834, 833, 832, 831, 830, 829, 828, 827, 826, 825, 824, 823, 822, 821, 820, 819, 818, 817, 816, 815, 814, 813, 812, 811, 810, 809, 808, 807, 806, 805, 804, 803, 802, 801, 800, 799, 798, 797, 796, 795, 794, 793, 792, 791, 790, 789, 788, 787, 786, 785, 784, 783, 782, 781, 780, 779, 778, 777, 776, 775, 774, 773, 772, 771, 770, 769, 768, 767, 766, 765, 764, 763, 762, 761, 760, 759, 758, 757, 756, 755, 754, 753, 752, 751, 750, 749, 748, 747, 746, 745, 744, 743, 742, 741, 740, 739, 738, 737, 736, 735, 734, 733, 732, 731, 730, 729, 728, 727, 726, 725, 724, 723, 722, 721, 720, 719, 718, 717, 716, 715, 714, 713, 712, 711, 710, 709, 708, 707, 706, 705, 704, 703, 702, 701, 700, 699, 698, 697, 696, 695, 694, 693, 692, 691, 690, 689, 688, 687, 686, 685, 684, 683, 682, 681, 680, 679, 678, 677, 676, 675, 674, 673, 672, 671, 670, 669, 668, 667, 666, 665, 664, 663, 662, 661, 660, 659, 658, 657, 656, 655, 654, 653, 652, 651, 650, 649, 648, 647, 646, 645, 644, 643, 642, 641, 640, 639, 638, 637, 636, 635, 634, 633, 632, 631, 630, 629, 628, 627, 626, 625, 624, 623, 622, 621, 620, 619, 618, 617, 616, 615, 614, 613, 612, 611, 610, 609, 608, 607, 606, 605, 604, 603, 602, 601, 600, 599, 598, 597, 596, 595, 594, 593, 592, 591, 590, 589, 588, 587, 586, 585, 584, 583, 582, 581, 580, 579, 578, 577, 576, 575, 574, 573, 572, 571, 570, 569, 568, 567, 566, 565, 564, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 551, 550, 549, 548, 547, 546, 545, 544, 543, 542, 541, 540, 539, 538, 537, 536, 535, 534, 533, 532, 531, 530, 529, 528, 527, 526, 525, 524, 523, 522, 521, 520, 519, 518, 517, 516, 515, 514, 513, 512, 511, 510, 509, 508, 507, 506, 505, 504, 503, 502, 501, 500, 499, 498, 497, 496, 495, 494, 493, 492, 491, 490, 489, 488, 487, 486, 485, 484, 483, 482, 481, 480, 479, 478, 477, 476, 475, 474, 473, 472, 471, 470, 469, 468, 467, 466, 465, 464, 463, 462, 461, 460, 459, 458, 457, 456, 455, 454, 453, 452, 451, 450, 449, 448, 447, 446, 445, 444, 443, 442, 441, 440, 439, 438, 437, 436, 435, 434, 433, 432, 431, 430, 429, 428, 427, 426, 425, 424, 423, 422, 421, 420, 419, 418, 417, 416, 415, 414, 413, 412, 411, 410, 409, 408, 407, 406, 405, 404, 403, 402, 401, 400, 399, 398, 397, 396, 395, 394, 393, 392, 391, 390, 389, 388, 387, 386, 385, 384, 383, 382, 381, 380, 379, 378, 377, 376, 375, 374, 373, 372, 371, 370, 369, 368, 367, 366, 365, 364, 363, 362, 361, 360, 359, 358, 357, 356, 355, 354, 353, 352, 351, 350, 349, 348, 347, 346, 345, 344, 343, 342, 341, 340, 339, 338, 337, 336, 335, 334, 333, 332, 331, 330, 329, 328, 327, 326, 325, 324, 323, 322, 321, 320, 319, 318, 317, 316, 315, 314, 313, 312, 311, 310, 309, 308, 307, 306, 305, 304, 303, 302, 301, 300, 299, 298, 297, 296, 295, 294, 293, 292, 291, 290, 289, 288, 287, 286, 285, 284, 283, 282, 281, 280, 279, 278, 277, 276, 275, 274, 273, 272, 271, 270, 269, 268, 267, 266, 265, 264, 263, 262, 261, 260, 259, 258, 257, 256, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224, 223, 222, 221, 220, 219, 218, 217, 216, 215, 214, 213, 212, 211, 210, 209, 208, 207, 206, 205, 204, 203, 202, 201, 200, 199, 198, 197, 196, 195, 194, 193, 192, 191, 190, 189, 188, 187, 186, 185, 184, 183, 182, 181, 180, 179, 178, 177, 176, 175, 174, 173, 172, 171, 170, 169, 168, 167, 166, 165, 164, 163, 162, 161, 160, 159, 158, 157, 156, 155, 154, 153, 152, 151, 150, 149, 148, 147, 146, 145, 144, 143, 142, 141, 140, 139, 138, 137, 136, 135, 134, 133, 132, 131, 130, 129, 128, 127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; int n = sizeof(arr)/sizeof(arr[0]); selectionSort(arr, n); printf("Sorted array: \n"); printArray(arr, n); end=omp_get_wtime(); printf("Selection sort openMP parallel code took %f seconds \n", end-begin); return 0; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait == UndefinedPixelTrait) && (draw_info->fill.alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue, exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; start=0; s=segment_stack; PushSegmentStack(y_offset,x_offset,x_offset,1); PushSegmentStack(y_offset+1,x_offset,x_offset,-1); GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Tile fill color onto floodplane. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelViaPixelInfo(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method,const StopInfo *stops, const size_t number_stops,ExceptionInfo *exception) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(stops != (const StopInfo *) NULL); assert(number_stops > 0); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1.0)*cosine)+ fabs((double) (image->rows-1.0)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/ 2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1.0)* (image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1.0)/2.0; gradient->radii.y=(double) (image->rows-1.0)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) (MagickMin((image->columns-1.0), (image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=number_stops; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memcpy(gradient->stops,stops,(size_t) number_stops* sizeof(*stops)); /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register size_t *histogram; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel = GetPixelChannelChannel(linear_image,i); PixelTrait traits = GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if ((paint_traits & CopyPixelTrait) != 0) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill argument. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target, % const PixelInfo *fill,const MagickBooleanType invert, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); ConformPixelInfo(image,fill,&conform_fill,exception); ConformPixelInfo(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert) { PixelTrait traits; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(image,(Quantum) conform_fill.red,q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(image,(Quantum) conform_fill.green,q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(image,(Quantum) conform_fill.blue,q); traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlack(image,(Quantum) conform_fill.black,q); traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelAlpha(image,(Quantum) conform_fill.alpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImage) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
additionally.c
#include "additionally.h" #include "gpu.h" #ifdef OPENCL #include "ocl.h" #endif #ifdef CUDNN #pragma comment(lib, "cudnn.lib") #endif #ifdef _OPENMP #include <omp.h> #endif #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" // global GPU index: cuda.c int gpu_index = 0; // im2col.c float im2col_get_pixel(float *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } // im2col.c //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c, h, w; int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } // -------------- my own -------------- // fuse convolutional and batch_norm weights into one convolutional-layer void yolov2_fuse_conv_batchnorm(network net) { int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; if (l->type == CONVOLUTIONAL) { //printf(" Fuse Convolutional layer \t\t l->size = %d \n", l->size); if (l->batch_normalize) { int f; for (f = 0; f < l->n; ++f) { l->biases[f] = l->biases[f] - l->scales[f] * l->rolling_mean[f] / (sqrtf(l->rolling_variance[f]) + .000001f); const size_t filter_size = l->size*l->size*l->c; int i; for (i = 0; i < filter_size; ++i) { int w_index = f*filter_size + i; l->weights[w_index] = l->weights[w_index] * l->scales[f] / (sqrtf(l->rolling_variance[f]) + .000001f); } } l->batch_normalize = 0; #ifdef GPU if (gpu_index >= 0) { push_convolutional_layer(*l); } #endif #ifdef OPENCL //if (gpu_index >= 0) { ocl_push_convolutional_layer(*l); //} #endif } } else { //printf(" Skip layer: %d \n", l->type); } } } // -------------- XNOR-net ------------ void binarize_weights(float *weights, int n, int size, float *binary) { int i, f; for (f = 0; f < n; ++f) { float mean = 0; for (i = 0; i < size; ++i) { mean += fabs(weights[f*size + i]); } mean = mean / size; for (i = 0; i < size; ++i) { binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; } } } void binarize_cpu(float *input, int n, float *binary) { int i; for (i = 0; i < n; ++i) { binary[i] = (input[i] > 0) ? 1 : -1; } } static inline unsigned char xnor(unsigned char a, unsigned char b) { //return a == b; return !(a^b); } // INT-32 static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) { size_t src_i = index / 32; int src_shift = index % 32; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; return val; } static inline uint32_t xnor_int32(uint32_t a, uint32_t b) { return ~(a^b); } static inline uint64_t xnor_int64(uint64_t a, uint64_t b) { return ~(a^b); } static inline uint32_t fill_bit_int32(char src) { if (src == 0) return 0x00000000; else return 0xFFFFFFFF; } static inline uint64_t fill_bit_int64(char src) { if (src == 0) return 0x0000000000000000; else return 0xFFFFFFFFFFFFFFFF; } void binary_int32_printf(uint32_t src) { int i; for (i = 0; i < 32; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } void binary_int64_printf(uint64_t src) { int i; for (i = 0; i < 64; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } void get_mean_array(float *src, size_t size, size_t filters, float *mean_arr) { size_t i, counter; counter = 0; for (i = 0; i < size; i += size / filters) { mean_arr[counter++] = fabs(src[i]); } } void binary_align_weights(convolutional_layer *l) { int m = l->n; int k = l->size*l->size*l->c; size_t new_lda = k + (l->lda_align - k % l->lda_align); // (k / 8 + 1) * 8; l->new_lda = new_lda; binarize_weights(l->weights, m, k, l->binary_weights); size_t align_weights_size = new_lda * m; l->align_bit_weights_size = align_weights_size / 8 + 1; float *align_weights = calloc(align_weights_size, sizeof(float)); l->align_bit_weights = calloc(l->align_bit_weights_size, sizeof(char)); size_t i, j; // align A without transpose for (i = 0; i < m; ++i) { for (j = 0; j < k; ++j) { align_weights[i*new_lda + j] = l->binary_weights[i*k + j]; } } if (l->c % 32 == 0) //if(gpu_index < 0 && l->stride == 1 && l->pad == 1 && l->c % 32 == 0) //if (l->stride == 1 && l->pad == 1 && l->c % 32 == 0) { int fil, chan; const int items_per_filter = l->c * l->size * l->size; //const int dst_items_per_filter = new_lda; for (fil = 0; fil < l->n; ++fil) { for (chan = 0; chan < l->c; chan += 32) { const int items_per_channel = l->size*l->size; for (i = 0; i < items_per_channel; ++i) { uint32_t val = 0; int c_pack; for (c_pack = 0; c_pack < 32; ++c_pack) { float src = l->binary_weights[fil*items_per_filter + (chan + c_pack)*items_per_channel + i]; //align_weights[fil*items_per_filter + chan*items_per_channel + i * 32 + c_pack] = src; align_weights[fil*new_lda + chan*items_per_channel + i * 32 + c_pack] = src; //val |= (src << c); } } } } //printf("\n l.index = %d \t aw[0] = %f, aw[1] = %f, aw[2] = %f, aw[3] = %f \n", l->index, align_weights[0], align_weights[1], align_weights[2], align_weights[3]); //memcpy(l->binary_weights, align_weights, (l->size * l->size * l->c * l->n) * sizeof(float)); float_to_bit(align_weights, l->align_bit_weights, align_weights_size); //if (l->n >= 32) if (gpu_index >= 0) { int M = l->n; int N = l->out_w*l->out_h; //printf("\n M = %d, N = %d, M %% 8 = %d, N %% 8 = %d - weights \n", M, N, M % 8, N % 8); //printf("\n l.w = %d, l.c = %d, l.n = %d \n", l->w, l->c, l->n); for (i = 0; i < align_weights_size / 8; ++i) l->align_bit_weights[i] = ~(l->align_bit_weights[i]); } get_mean_array(l->binary_weights, m*k, l->n, l->mean_arr); //get_mean_array(l->binary_weights, m*new_lda, l->n, l->mean_arr); } else { float_to_bit(align_weights, l->align_bit_weights, align_weights_size); get_mean_array(l->binary_weights, m*k, l->n, l->mean_arr); } //l->mean_arr = calloc(l->n, sizeof(float)); //get_mean_array(align_weights, align_weights_size, l->n, l->mean_arr); #ifdef GPU cudaError_t status; l->align_workspace_size = l->bit_align * l->size * l->size * l->c; status = cudaMalloc((void **)&l->align_workspace_gpu, l->align_workspace_size * sizeof(float)); status = cudaMalloc((void **)&l->transposed_align_workspace_gpu, l->align_workspace_size * sizeof(float)); CHECK_CUDA(status); //l->align_bit_weights_gpu = cuda_make_array(l->align_bit_weights, l->align_bit_weights_size * sizeof(char)/sizeof(float)); status = cudaMalloc((void **)&l->align_bit_weights_gpu, l->align_bit_weights_size); CHECK_CUDA(status); status = cudaMemcpy(l->align_bit_weights_gpu, l->align_bit_weights, l->align_bit_weights_size, cudaMemcpyHostToDevice); CHECK_CUDA(status); status = cudaMemcpy(l->binary_weights_gpu, l->binary_weights, m*k * sizeof(float), cudaMemcpyHostToDevice); CHECK_CUDA(status); //l->mean_arr_gpu = cuda_make_array(l->mean_arr, l->n); cuda_push_array(l->mean_arr_gpu, l->mean_arr, l->n); CHECK_CUDA(cudaDeviceSynchronize()); #endif // GPU free(align_weights); } void forward_blank_layer(layer l, network_state state) {} void calculate_binary_weights(network net) { int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; if (l->type == CONVOLUTIONAL) { //printf(" Merges Convolutional-%d and batch_norm \n", j); if (l->xnor) { //printf("\n %d \n", j); //l->lda_align = 256; // 256bit for AVX2 // set in make_convolutional_layer() //if (l->size*l->size*l->c >= 2048) l->lda_align = 512; binary_align_weights(l); if (net.layers[j].use_bin_output) { //l->activation = LINEAR; } #ifdef GPU // fuse conv_xnor + shortcut -> conv_xnor if ((j + 1) < net.n && net.layers[j].type == CONVOLUTIONAL) { layer *sc = &net.layers[j + 1]; if (sc->type == SHORTCUT && sc->w == sc->out_w && sc->h == sc->out_h && sc->c == sc->out_c) { l->bin_conv_shortcut_in_gpu = net.layers[net.layers[j + 1].index].output_gpu; l->bin_conv_shortcut_out_gpu = net.layers[j + 1].output_gpu; net.layers[j + 1].type = BLANK; net.layers[j + 1].forward_gpu = forward_blank_layer; } } #endif // GPU } } } //printf("\n calculate_binary_weights Done! \n"); } static inline void set_bit(unsigned char *const dst, size_t index) { size_t dst_i = index / 8; int dst_shift = index % 8; dst[dst_i] |= 1 << dst_shift; } static inline unsigned char get_bit(unsigned char const*const src, size_t index) { size_t src_i = index / 8; int src_shift = index % 8; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; return val; } /* static inline unsigned char reverse_byte_1(char a) { return ((a & 0x1) << 7) | ((a & 0x2) << 5) | ((a & 0x4) << 3) | ((a & 0x8) << 1) | ((a & 0x10) >> 1) | ((a & 0x20) >> 3) | ((a & 0x40) >> 5) | ((a & 0x80) >> 7); } static inline unsigned char reverse_byte(unsigned char a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } static unsigned char lookup[16] = { 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, }; static inline unsigned char reverse_byte_3(unsigned char n) { // Reverse the top and bottom nibble then swap them. return (lookup[n & 0b1111] << 4) | lookup[n >> 4]; } static inline void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B) { unsigned x, y, t; // Load the array and pack it into x and y. x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]; y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]; t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x); B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y); } void transpose_bin(char *A, char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += 8) { int j; for (j = 0; j < m - 8; j += 8) { int a_index = i*lda + j; int b_index = j*ldb + i; //transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8); transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]); } for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); } } } */ uint8_t reverse_8_bit(uint8_t a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } uint32_t reverse_32_bit(uint32_t a) { // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input)); return (reverse_8_bit(a >> 24) << 0) | (reverse_8_bit(a >> 16) << 8) | (reverse_8_bit(a >> 8) << 16) | (reverse_8_bit(a >> 0) << 24); } #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j); void transpose32_optimized(uint32_t A[32]) { int j, k; unsigned m, t; //m = 0x0000FFFF; //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) { // for (k = 0; k < 32; k = (k + j + 1) & ~j) { // t = (A[k] ^ (A[k + j] >> j)) & m; // A[k] = A[k] ^ t; // A[k + j] = A[k + j] ^ (t << j); // } //} j = 16; m = 0x0000FFFF; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 8; m = 0x00ff00ff; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 4; m = 0x0f0f0f0f; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 2; m = 0x33333333; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 1; m = 0x55555555; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } // reverse Y for (j = 0; j < 16; ++j) { uint32_t tmp = A[j]; A[j] = reverse_32_bit(A[31 - j]); A[31 - j] = reverse_32_bit(tmp); } } void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n) { unsigned A_tmp[32]; int i; #pragma unroll for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; transpose32_optimized(A_tmp); #pragma unroll for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; } // transpose by 32-bit void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += 32) { int j; for (j = 0; j < m; j += 32) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32); //transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32); } for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); } } } // popcnt 32 bit static inline int popcnt_32(uint32_t val32) { #ifdef WIN32 // Windows MSVS int tmp_count = __popcnt(val32); #else // Linux GCC int tmp_count = __builtin_popcount(val32); #endif return tmp_count; } void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) // out_h*out_w; { float val = 0; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { register uint32_t A_PART = ((uint32_t*)A)[i*lda + s]; register uint32_t B_PART = ((uint32_t*)B)[j*ldb + s]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int val += (2 * count - 32) * mean_val; } C[i*ldc + j] += val; } } } // 32 channels -> 1 channel (with 32 floats) // 256 channels -> 8 channels (with 32 floats) void repack_input(float *input, float *re_packed_input, int w, int h, int c) { const int items_per_channel = w * h; int chan, i; for (chan = 0; chan < c; chan += 32) { for (i = 0; i < items_per_channel; ++i) { int c_pack; for (c_pack = 0; c_pack < 32; ++c_pack) { float src = input[(chan + c_pack)*items_per_channel + i]; re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src; } } } } // transpose uint32_t matrix void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align) { //l.bit_align - algined (n) by 32 //new_ldb - aligned (k) by 256 int i; //#pragma omp parallel for for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c; { int j; for (j = 0; j < src_w; j += 1) // out_h*out_w; { ((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j]; } } } // convolution repacked bit matrix (32 channels -> 1 uint32_t) XNOR-net void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output, int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr) { int fil; // filter index #pragma omp parallel for for (fil = 0; fil < n; ++fil) { float mean_val = mean_arr[fil]; int chan, c_pack, y, x, f_y, f_x; // channel index for (chan = 0; chan < c / 32; ++chan) //for (chan = 0; chan < l.c; chan += 32) //for (c_pack = 0; c_pack < 32; ++c_pack) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; float sum = 0; // filter - y for (f_y = 0; f_y < size; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < size; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; // normal //float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x]; //float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x]; // packed //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //sum += input * weight; //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //uint32_t bit1 = input > 0; //uint32_t bit2 = weight > 0; //uint32_t count = (~(bit1 ^ bit2)) & 1; //float result = (2 * (float)count - 1) * mean_val; //printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result); //sum += result; uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x]; //uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x]; uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x]; uint32_t xnor_result = ~(input ^ weight); int32_t count = popcnt_32(xnor_result); // mandatory Signed int sum += (2 * count - 32) * mean_val; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } // -------------- blas.c -------------- #ifdef AVX #ifdef _WIN64 // Windows #include <intrin.h> #else // Linux #include <x86intrin.h> #endif #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #include <emmintrin.h> // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=broad&expand=561 // https://software.intel.com/sites/landingpage/IntrinsicsGuide void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { float A_PART = ALPHA*A[i*lda + k]; __m256 a256, b256, c256, result256; // AVX a256 = _mm256_set1_ps(A_PART); for (j = 0; j < N - 8; j += 8) { b256 = _mm256_loadu_ps(&B[k*ldb + j]); c256 = _mm256_loadu_ps(&C[i*ldc + j]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //result256 = _mm256_fmadd_ps(a256, b256, c256); result256 = _mm256_mul_ps(a256, b256); result256 = _mm256_add_ps(result256, c256); _mm256_storeu_ps(&C[i*ldc + j], result256); } int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8; for (j = prev_end; j < N; ++j) C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { register uint32_t A_PART = A[i*lda + s]; __m256i a256 = _mm256_set1_epi32(A_PART); for (j = 0; j < N - 8; j += 8) { __m256i b256 = *((__m256i*)&B[s*ldb + j]); __m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b) __m256i all_1 = _mm256_set1_epi8(255); __m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b)) // waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a) __m256 count = _mm256_setr_ps( popcnt_32(_mm256_extract_epi32(xnor256, 0)), popcnt_32(_mm256_extract_epi32(xnor256, 1)), popcnt_32(_mm256_extract_epi32(xnor256, 2)), popcnt_32(_mm256_extract_epi32(xnor256, 3)), popcnt_32(_mm256_extract_epi32(xnor256, 4)), popcnt_32(_mm256_extract_epi32(xnor256, 5)), popcnt_32(_mm256_extract_epi32(xnor256, 6)), popcnt_32(_mm256_extract_epi32(xnor256, 7))); __m256 val2 = _mm256_set1_ps(2); count = _mm256_mul_ps(count, val2); // count * 2 __m256 val32 = _mm256_set1_ps(32); count = _mm256_sub_ps(count, val32); // count - 32 __m256 mean256 = _mm256_set1_ps(mean_val); count = _mm256_mul_ps(count, mean256); // count * mean_val __m256 c256 = *((__m256*)&C[i*ldc + j]); count = _mm256_add_ps(count, c256); // c = c + count *((__m256*)&C[i*ldc + j]) = count; } for (; j < N; ++j) // out_h*out_w; { register uint32_t B_PART = B[s*ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; } } } } #if defined(_MSC_VER) && _MSC_VER <= 1900 static inline __int32 _mm256_extract_epi64(__m256i a, const int index) { return a.m256i_i64[index]; } static inline __int32 _mm256_extract_epi32(__m256i a, const int index) { return a.m256i_i32[index]; } #endif static inline float _castu32_f32(uint32_t a) { return *((float *)&a); } #if defined(_MSC_VER) // Windows static inline float _mm256_extract_float32(__m256 a, const int index) { return a.m256_f32[index]; } #else // Linux static inline float _mm256_extract_float32(__m256 a, const int index) { return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), index)); } #endif //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1)// && is_fma_avx()) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.00); int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 //mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 uint16_t *dst_ptr = &((unsigned char*)data_col)[col_index / 8]; *dst_ptr |= (mask << (col_index % 8)); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit(data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i = 0; if (a == LINEAR) { } else if (a == LEAKY) { { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 all256_01 = _mm256_set1_ps(0.1F); for (i = 0; i < n - 8; i += 8) { //x[i] = (x[i]>0) ? x[i] : .1*x[i]; __m256 src256 = _mm256_loadu_ps(&x[i]); __m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1 __m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats __m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult; _mm256_storeu_ps(&x[i], result256); } } for (; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { const int w_offset = -pad / 2; const int h_offset = -pad / 2; int b, k; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { //for (j = 0; j < out_w; ++j) { j = 0; if (stride == 1) { for (j = 0; j < out_w - 8 - (size - 1); j += 8) { int out_index = j + out_w*(i + out_h*(k + c*b)); __m256 max256 = _mm256_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); max256 = _mm256_max_ps(src256, max256); } } _mm256_storeu_ps(&dst[out_index], max256); } } else if (size == 2 && stride == 2) { for (j = 0; j < out_w - 4; j += 4) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; __m128 max128 = _mm_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { //for (m = 0; m < size; ++m) m = 0; { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); __m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4)); __m256 max256 = _mm256_max_ps(src256, src256_2); __m128 src128_0 = _mm256_extractf128_ps(max256, 0); __m128 src128_1 = _mm256_extractf128_ps(max256, 1); __m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6)); max128 = _mm_max_ps(src128, max128); } } _mm_storeu_ps(&dst[out_index], max128); } } for (; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } // http://graphics.stanford.edu/~seander/bithacks.html // https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register // https://arxiv.org/pdf/1611.07612.pdf static inline int popcnt128(__m128i n) { const __m128i n_hi = _mm_unpackhi_epi64(n, n); #ifdef _MSC_VER return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi)); #else return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi)); #endif } static inline int popcnt256(__m256i n) { return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1)); } static inline __m256i count256(__m256i v) { __m256i lookup = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4); __m256i low_mask = _mm256_set1_epi8(0x0f); __m256i lo = _mm256_and_si256(v, low_mask); __m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask); __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo); __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi); __m256i total = _mm256_add_epi8(popcnt1, popcnt2); return _mm256_sad_epu8(total, _mm256_setzero_si256()); } static inline int popcnt256_custom(__m256i n) { __m256i val = count256(n); //return val.m256i_i64[0] + //val.m256i_i64[1] + //val.m256i_i64[2] + //val.m256i_i64[3]; return _mm256_extract_epi64(val, 0) + _mm256_extract_epi64(val, 1) + _mm256_extract_epi64(val, 2) + _mm256_extract_epi64(val, 3); } // 5x times faster than gemm()-float32 // further optimizations: do mean-mult only for the last layer void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads(max_num_threads / 2); } #endif int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] float mean_val = mean_arr[i]; int j, k; __m256i all_1 = _mm256_set1_epi8(255); for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256 = _mm256_loadu_si256((__m256i *)(A + (i*lda + k) / 8)); __m256i b_bit256 = _mm256_loadu_si256((__m256i *)(B + (j*ldb + k) / 8)); __m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b)) __m256i c_bit256 = _mm256_andnot_si256(xor256, all_1); // can be optimized - we can do other NOT for wegihts once and do not do this NOT count_sum = _mm256_add_epi64(count256(c_bit256), count_sum); // Mula�s algorithm //count += popcnt256(c_bit256); //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } // count of 1 bits //count = count_sum.m256i_i64[0] + // count_sum.m256i_i64[1] + // count_sum.m256i_i64[2] + // count_sum.m256i_i64[3]; count = _mm256_extract_epi64(count_sum, 0) + _mm256_extract_epi64(count_sum, 1) + _mm256_extract_epi64(count_sum, 2) + _mm256_extract_epi64(count_sum, 3); int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) * mean_val; } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.0); for (i = 0; i < size; i += 8) { //__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 ////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&src[i])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 dst[i / 8] = mask; } } #else // AVX void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { register uint32_t A_PART = A[i*lda + s]; for (j = 0; j < N; ++j) // out_h*out_w; { register uint32_t B_PART = B[s*ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; } } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 1) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit(data_col, col_index); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit(data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i = 0; if (a == LINEAR) {} else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { int b, k; const int w_offset = -pad / 2; const int h_offset = -pad / 2; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { for (j = 0; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } static inline int popcnt_64(uint64_t val64) { #ifdef WIN32 // Windows #ifdef _WIN64 // Windows 64-bit int tmp_count = __popcnt64(val64); #else // Windows 32-bit int tmp_count = __popcnt(val64); tmp_count += __popcnt(val64 >> 32); #endif #else // Linux #ifdef __x86_64__ // Linux 64-bit int tmp_count = __builtin_popcountll(val64); #else // Linux 32-bit int tmp_count = __builtin_popcount(val64); tmp_count += __builtin_popcount(val64); #endif #endif return tmp_count; } void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i, j, k, h; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = popcnt_64(c_bit64); if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; char *byte_arr = calloc(size, sizeof(char)); for (i = 0; i < size; ++i) { if (src[i] > 0) byte_arr[i] = 1; } //for (i = 0; i < size; ++i) { // dst[i / 8] |= byte_arr[i] << (i % 8); //} for (i = 0; i < size; i += 8) { char dst_tmp = 0; dst_tmp |= byte_arr[i + 0] << 0; dst_tmp |= byte_arr[i + 1] << 1; dst_tmp |= byte_arr[i + 2] << 2; dst_tmp |= byte_arr[i + 3] << 3; dst_tmp |= byte_arr[i + 4] << 4; dst_tmp |= byte_arr[i + 5] << 5; dst_tmp |= byte_arr[i + 6] << 6; dst_tmp |= byte_arr[i + 7] << 7; dst[i / 8] = dst_tmp; } free(byte_arr); } #endif // __x86_64 /* void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } */ void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; for (i = 0; i < N; ++i) X[i*INCX] = ALPHA; } // -------------- utils.c -------------- // utils.c void error(const char *s) { perror(s); assert(0); exit(-1); } // utils.c void malloc_error() { fprintf(stderr, "Malloc error\n"); exit(-1); } // utils.c void file_error(char *s) { fprintf(stderr, "Couldn't open file: %s\n", s); exit(0); } // utils.c char *fgetl(FILE *fp) { if (feof(fp)) return 0; size_t size = 512; char *line = malloc(size * sizeof(char)); if (!fgets(line, size, fp)) { free(line); return 0; } size_t curr = strlen(line); while ((line[curr - 1] != '\n') && !feof(fp)) { if (curr == size - 1) { size *= 2; line = realloc(line, size * sizeof(char)); if (!line) { printf("%ld\n", (int long)size); malloc_error(); } } size_t readsize = size - curr; if (readsize > INT_MAX) readsize = INT_MAX - 1; fgets(&line[curr], readsize, fp); curr = strlen(line); } if (line[curr - 1] == '\n') line[curr - 1] = '\0'; return line; } // utils.c int *read_map(char *filename) { int n = 0; int *map = 0; char *str; FILE *file = fopen(filename, "r"); if (!file) file_error(filename); while ((str = fgetl(file))) { ++n; map = realloc(map, n * sizeof(int)); map[n - 1] = atoi(str); } return map; } // utils.c void del_arg(int argc, char **argv, int index) { int i; for (i = index; i < argc - 1; ++i) argv[i] = argv[i + 1]; argv[i] = 0; } // utils.c int find_arg(int argc, char* argv[], char *arg) { int i; for (i = 0; i < argc; ++i) { if (!argv[i]) continue; if (0 == strcmp(argv[i], arg)) { del_arg(argc, argv, i); return 1; } } return 0; } // utils.c int find_int_arg(int argc, char **argv, char *arg, int def) { int i; for (i = 0; i < argc - 1; ++i) { if (!argv[i]) continue; if (0 == strcmp(argv[i], arg)) { def = atoi(argv[i + 1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } // utils.c float find_float_arg(int argc, char **argv, char *arg, float def) { int i; for (i = 0; i < argc - 1; ++i) { if (!argv[i]) continue; if (0 == strcmp(argv[i], arg)) { def = atof(argv[i + 1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } // utils.c char *find_char_arg(int argc, char **argv, char *arg, char *def) { int i; for (i = 0; i < argc - 1; ++i) { if (!argv[i]) continue; if (0 == strcmp(argv[i], arg)) { def = argv[i + 1]; del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } // utils.c void strip(char *s) { size_t i; size_t len = strlen(s); size_t offset = 0; for (i = 0; i < len; ++i) { char c = s[i]; if (c == ' ' || c == '\t' || c == '\n' || c == '\r') ++offset; else s[i - offset] = c; } s[len - offset] = '\0'; } // utils.c void list_insert(list *l, void *val) { node *new = malloc(sizeof(node)); new->val = val; new->next = 0; if (!l->back) { l->front = new; new->prev = 0; } else { l->back->next = new; new->prev = l->back; } l->back = new; ++l->size; } // utils.c float rand_uniform(float min, float max) { if (max < min) { float swap = min; min = max; max = swap; } return ((float)rand() / RAND_MAX * (max - min)) + min; } // utils.c float rand_scale(float s) { float scale = rand_uniform(1, s); if (rand() % 2) return scale; return 1. / scale; } // utils.c int rand_int(int min, int max) { if (max < min) { int s = min; min = max; max = s; } int r = (rand() % (max - min + 1)) + min; return r; } // utils.c int constrain_int(int a, int min, int max) { if (a < min) return min; if (a > max) return max; return a; } // utils.c float dist_array(float *a, float *b, int n, int sub) { int i; float sum = 0; for (i = 0; i < n; i += sub) sum += powf(a[i] - b[i], 2); return sqrt(sum); } // utils.c float mag_array(float *a, int n) { int i; float sum = 0; for (i = 0; i < n; ++i) { sum += a[i] * a[i]; } return sqrt(sum); } // utils.c int max_index(float *a, int n) { if (n <= 0) return -1; int i, max_i = 0; float max = a[0]; for (i = 1; i < n; ++i) { if (a[i] > max) { max = a[i]; max_i = i; } } return max_i; } // utils.c // From http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform float rand_normal() { static int haveSpare = 0; static double rand1, rand2; if (haveSpare) { haveSpare = 0; return sqrt(rand1) * sin(rand2); } haveSpare = 1; rand1 = rand() / ((double)RAND_MAX); if (rand1 < 1e-100) rand1 = 1e-100; rand1 = -2 * log(rand1); rand2 = (rand() / ((double)RAND_MAX)) * TWO_PI; return sqrt(rand1) * cos(rand2); } // utils.c void free_ptrs(void **ptrs, int n) { int i; for (i = 0; i < n; ++i) free(ptrs[i]); free(ptrs); } // -------------- tree.c -------------- // tree.c void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves) { int j; for (j = 0; j < n; ++j) { int parent = hier->parent[j]; if (parent >= 0) { predictions[j] *= predictions[parent]; } } if (only_leaves) { for (j = 0; j < n; ++j) { if (!hier->leaf[j]) predictions[j] = 0; } } } // tree.c tree *read_tree(char *filename) { tree t = { 0 }; FILE *fp = fopen(filename, "r"); char *line; int last_parent = -1; int group_size = 0; int groups = 0; int n = 0; while ((line = fgetl(fp)) != 0) { char *id = calloc(256, sizeof(char)); int parent = -1; sscanf(line, "%s %d", id, &parent); t.parent = realloc(t.parent, (n + 1) * sizeof(int)); t.parent[n] = parent; t.name = realloc(t.name, (n + 1) * sizeof(char *)); t.name[n] = id; if (parent != last_parent) { ++groups; t.group_offset = realloc(t.group_offset, groups * sizeof(int)); t.group_offset[groups - 1] = n - group_size; t.group_size = realloc(t.group_size, groups * sizeof(int)); t.group_size[groups - 1] = group_size; group_size = 0; last_parent = parent; } t.group = realloc(t.group, (n + 1) * sizeof(int)); t.group[n] = groups; ++n; ++group_size; } ++groups; t.group_offset = realloc(t.group_offset, groups * sizeof(int)); t.group_offset[groups - 1] = n - group_size; t.group_size = realloc(t.group_size, groups * sizeof(int)); t.group_size[groups - 1] = group_size; t.n = n; t.groups = groups; t.leaf = calloc(n, sizeof(int)); int i; for (i = 0; i < n; ++i) t.leaf[i] = 1; for (i = 0; i < n; ++i) if (t.parent[i] >= 0) t.leaf[t.parent[i]] = 0; fclose(fp); tree *tree_ptr = calloc(1, sizeof(tree)); *tree_ptr = t; //error(0); return tree_ptr; } // -------------- list.c -------------- // list.c list *make_list() { list *l = malloc(sizeof(list)); l->size = 0; l->front = 0; l->back = 0; return l; } // list.c list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if (!file) file_error(filename); list *lines = make_list(); while ((path = fgetl(file))) { list_insert(lines, path); } fclose(file); return lines; } // list.c void **list_to_array(list *l) { void **a = calloc(l->size, sizeof(void*)); int count = 0; node *n = l->front; while (n) { a[count++] = n->val; n = n->next; } return a; } // list.c void free_node(node *n) { node *next; while (n) { next = n->next; free(n); n = next; } } // list.c void free_list(list *l) { free_node(l->front); free(l); } // list.c char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } // -------------- network.c -------------- // network.c float *get_network_output(network net) { #ifdef GPU if (gpu_index >= 0) return get_network_output_gpu(net); #endif int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; } // network.c int get_network_output_size(network net) { int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].outputs; } // network.c network make_network(int n) { network net = { 0 }; net.n = n; net.layers = calloc(net.n, sizeof(layer)); net.seen = calloc(1, sizeof(uint64_t)); #ifdef GPU net.input_gpu = calloc(1, sizeof(float *)); net.truth_gpu = calloc(1, sizeof(float *)); #endif return net; } void free_network(network net) { int i; for (i = 0; i < net.n; ++i) { free_layer(net.layers[i]); } free(net.layers); free(net.scales); free(net.steps); free(net.seen); #ifdef GPU if (gpu_index >= 0) cuda_free(net.workspace); else free(net.workspace); if (net.input_state_gpu) cuda_free(net.input_state_gpu); if (net.input_pinned_cpu) { // CPU if (net.input_pinned_cpu_flag) cudaFreeHost(net.input_pinned_cpu); else free(net.input_pinned_cpu); } if (*net.input_gpu) cuda_free(*net.input_gpu); if (*net.truth_gpu) cuda_free(*net.truth_gpu); if (net.input_gpu) free(net.input_gpu); if (net.truth_gpu) free(net.truth_gpu); //if (*net.input16_gpu) cuda_free(*net.input16_gpu); //if (*net.output16_gpu) cuda_free(*net.output16_gpu); //if (net.input16_gpu) free(net.input16_gpu); //if (net.output16_gpu) free(net.output16_gpu); //if (net.max_input16_size) free(net.max_input16_size); //if (net.max_output16_size) free(net.max_output16_size); #else free(net.workspace); #endif } // network.c #ifdef GPU #ifdef CUDNN void cudnn_convolutional_setup(layer *l) { #if(CUDNN_MAJOR >= 7) cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH); #if((CUDNN_MAJOR*10 + CUDNN_MINOR) >= 72) // cuDNN >= 7.2 cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION); #endif //(CUDNN_MAJOR >= 7.2) #endif //(CUDNN_MAJOR >= 7) if (l->quantized) { cudnnDataType_t cudnn_data_type = CUDNN_DATA_INT8x4; cudnnTensorFormat_t tensor_format = CUDNN_TENSOR_NCHW_VECT_C; cudnnTensorFormat_t dst_tensor_format = CUDNN_TENSOR_NCHW; #if((CUDNN_MAJOR*10 + CUDNN_MINOR) >= 72) //if (l->c % 32 == 0) cudnn_data_type = CUDNN_DATA_INT8x32; // Tensor Cores for INT8 #endif //(CUDNN_MAJOR >= 7.2) cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW_VECT_C, cudnn_data_type, l->batch, l->c, l->h, l->w); cudnnSetFilter4dDescriptor(l->weightDesc, cudnn_data_type, CUDNN_TENSOR_NCHW_VECT_C, l->n, l->c, l->size, l->size); cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_INT32); // cudnn 7 l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; // only supported for DP4A INT8x4 // BIAS float cudnnSetTensor4dDescriptor(l->biasTensorDesc, dst_tensor_format, CUDNN_DATA_FLOAT, 1, l->out_c, 1, 1); // https://en.wikipedia.org/wiki/Activation_function // CUDNN_ACTIVATION_IDENTITY cudnnSetActivationDescriptor(l->activationDesc, CUDNN_ACTIVATION_IDENTITY, CUDNN_NOT_PROPAGATE_NAN, 0); //cudnnSetActivationDescriptor(activationDesc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0.1); // RELU or ELU can't replace LEAKY_RELU } else { cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); cudnnSetFilter4dDescriptor(l->weightDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); #if(CUDNN_MAJOR >= 6) cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); // cudnn 6.0 #else cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION); // cudnn 5.1 #endif cudnnGetConvolutionForwardAlgorithm(cudnn_handle(), l->srcTensorDesc, l->weightDesc, l->convDesc, l->dstTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &l->fw_algo); //l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; // un-comment to use Tensor Cores for cuDNN >= 7.2 } } #endif #endif // network.c void set_batch_network(network *net, int b) { net->batch = b; int i; for (i = 0; i < net->n; ++i) { layer l = net->layers[i]; l.batch = b; #ifdef CUDNN if (l.type == CONVOLUTIONAL) { cudnn_convolutional_setup(net->layers + i); } #endif } } // -------------- layer.c -------------- void free_layer(layer l) { if (l.type == DROPOUT) { if (l.rand) free(l.rand); #ifdef GPU if (l.rand_gpu) cuda_free(l.rand_gpu); #endif return; } if (l.cweights) free(l.cweights); if (l.indexes) free(l.indexes); if (l.input_layers) free(l.input_layers); if (l.input_sizes) free(l.input_sizes); if (l.map) free(l.map); if (l.rand) free(l.rand); if (l.cost) free(l.cost); if (l.state) free(l.state); if (l.prev_state) free(l.prev_state); if (l.forgot_state) free(l.forgot_state); if (l.forgot_delta) free(l.forgot_delta); if (l.state_delta) free(l.state_delta); if (l.concat) free(l.concat); if (l.concat_delta) free(l.concat_delta); if (l.binary_weights) free(l.binary_weights); if (l.biases) free(l.biases); if (l.biases_quant) free(l.biases_quant); //if (l.bias_updates) free(l.bias_updates); if (l.scales) free(l.scales); //if (l.scale_updates) free(l.scale_updates); if (l.weights) free(l.weights); if (l.weights_int8) free(l.weights_int8); if (l.align_bit_weights) free(l.align_bit_weights); if (l.mean_arr) free(l.mean_arr); //if (l.weight_updates) free(l.weight_updates); //if (l.delta) free(l.delta); #ifdef GPU if (l.output && l.output_pinned) { cudaFreeHost(l.output); l.output = NULL; } #endif if (l.output) free(l.output); if (l.output_int8) free(l.output_int8); if (l.squared) free(l.squared); if (l.norms) free(l.norms); if (l.spatial_mean) free(l.spatial_mean); if (l.mean) free(l.mean); if (l.variance) free(l.variance); //if (l.mean_delta) free(l.mean_delta); //if (l.variance_delta) free(l.variance_delta); if (l.rolling_mean) free(l.rolling_mean); if (l.rolling_variance) free(l.rolling_variance); if (l.x) free(l.x); if (l.x_norm) free(l.x_norm); if (l.m) free(l.m); if (l.v) free(l.v); if (l.z_cpu) free(l.z_cpu); if (l.r_cpu) free(l.r_cpu); if (l.h_cpu) free(l.h_cpu); if (l.binary_input) free(l.binary_input); if (l.mask) free(l.mask); #ifdef GPU if (l.indexes_gpu) cuda_free((float *)l.indexes_gpu); if (l.z_gpu) cuda_free(l.z_gpu); if (l.r_gpu) cuda_free(l.r_gpu); if (l.h_gpu) cuda_free(l.h_gpu); if (l.m_gpu) cuda_free(l.m_gpu); if (l.v_gpu) cuda_free(l.v_gpu); if (l.prev_state_gpu) cuda_free(l.prev_state_gpu); if (l.forgot_state_gpu) cuda_free(l.forgot_state_gpu); if (l.forgot_delta_gpu) cuda_free(l.forgot_delta_gpu); if (l.state_gpu) cuda_free(l.state_gpu); if (l.state_delta_gpu) cuda_free(l.state_delta_gpu); if (l.gate_gpu) cuda_free(l.gate_gpu); if (l.gate_delta_gpu) cuda_free(l.gate_delta_gpu); if (l.save_gpu) cuda_free(l.save_gpu); if (l.save_delta_gpu) cuda_free(l.save_delta_gpu); if (l.concat_gpu) cuda_free(l.concat_gpu); if (l.concat_delta_gpu) cuda_free(l.concat_delta_gpu); if (l.binary_input_gpu) cuda_free(l.binary_input_gpu); if (l.binary_weights_gpu) cuda_free(l.binary_weights_gpu); if (l.mean_gpu) cuda_free(l.mean_gpu); if (l.variance_gpu) cuda_free(l.variance_gpu); if (l.rolling_mean_gpu) cuda_free(l.rolling_mean_gpu); if (l.rolling_variance_gpu) cuda_free(l.rolling_variance_gpu); if (l.variance_delta_gpu) cuda_free(l.variance_delta_gpu); if (l.mean_delta_gpu) cuda_free(l.mean_delta_gpu); if (l.x_gpu) cuda_free(l.x_gpu); if (l.x_norm_gpu) cuda_free(l.x_norm_gpu); if (l.align_bit_weights_gpu) cuda_free(l.align_bit_weights_gpu); if (l.mean_arr_gpu) cuda_free(l.mean_arr_gpu); if (l.align_workspace_gpu) cuda_free(l.align_workspace_gpu); if (l.transposed_align_workspace_gpu) cuda_free(l.transposed_align_workspace_gpu); if (l.weights_gpu) cuda_free(l.weights_gpu); //if (l.weight_updates_gpu) cuda_free(l.weight_updates_gpu); if (l.biases_gpu) cuda_free(l.biases_gpu); //if (l.bias_updates_gpu) cuda_free(l.bias_updates_gpu); if (l.scales_gpu) cuda_free(l.scales_gpu); //if (l.scale_updates_gpu) cuda_free(l.scale_updates_gpu); if (l.output_gpu) cuda_free(l.output_gpu); if (l.output_gpu_int8) cuda_free(l.output_gpu_int8); if (l.delta_gpu) cuda_free(l.delta_gpu); if (l.rand_gpu) cuda_free(l.rand_gpu); if (l.squared_gpu) cuda_free(l.squared_gpu); if (l.norms_gpu) cuda_free(l.norms_gpu); #endif #ifdef CUDNN cudnnDestroyTensorDescriptor(l.biasTensorDesc); cudnnDestroyActivationDescriptor(l.activationDesc); cudnnDestroyTensorDescriptor(l.srcTensorDesc); cudnnDestroyTensorDescriptor(l.dstTensorDesc); cudnnDestroyFilterDescriptor(l.weightDesc); cudnnDestroyConvolutionDescriptor(l.convDesc); cudnnDestroyPoolingDescriptor(l.poolingDesc); #endif } // -------------- softmax_layer.c -------------- // softmax_layer.c softmax_layer make_softmax_layer(int batch, int inputs, int groups) { assert(inputs%groups == 0); fprintf(stderr, "softmax %4d\n", inputs); softmax_layer l = { 0 }; l.type = SOFTMAX; l.batch = batch; l.groups = groups; l.inputs = inputs; l.outputs = inputs; l.output = calloc(inputs*batch, sizeof(float)); //l.delta = calloc(inputs*batch, sizeof(float)); // commented only for this custom version of Yolo v2 //l.forward = forward_softmax_layer; //l.backward = backward_softmax_layer; #ifdef GPU // commented only for this custom version of Yolo v2 //l.forward_gpu = forward_softmax_layer_gpu; //l.backward_gpu = backward_softmax_layer_gpu; l.output_gpu = cuda_make_array(l.output, inputs*batch); cudaError_t status; status = cudaMalloc((void **)&(l.output_gpu_int8), sizeof(int8_t)*inputs*batch); //l.delta_gpu = cuda_make_array(l.delta, inputs*batch); #endif #ifdef OPENCL l.output_ocl = ocl_make_array(l.output, inputs*batch); #endif return l; } // -------------- upsample_layer.c -------------- // upsample_layer.c layer make_upsample_layer(int batch, int w, int h, int c, int stride) { layer l = { 0 }; l.type = UPSAMPLE; l.batch = batch; l.w = w; l.h = h; l.c = c; l.out_w = w*stride; l.out_h = h*stride; l.out_c = c; if (stride < 0) { stride = -stride; l.reverse = 1; l.out_w = w / stride; l.out_h = h / stride; } l.stride = stride; l.outputs = l.out_w*l.out_h*l.out_c; l.inputs = l.w*l.h*l.c; //l.delta = calloc(l.outputs*batch, sizeof(float)); l.output = calloc(l.outputs*batch, sizeof(float));; //l.forward = forward_upsample_layer; #ifdef GPU //l.forward_gpu = forward_upsample_layer_gpu; l.output_gpu = cuda_make_array(l.output, l.outputs*batch); #endif if (l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } // -------------- shortcut_layer.c -------------- // shortcut.c layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) { fprintf(stderr, "Shortcut Layer: %d\n", index); layer l = { 0 }; l.type = SHORTCUT; l.batch = batch; l.w = w2; l.h = h2; l.c = c2; l.out_w = w; l.out_h = h; l.out_c = c; l.outputs = w*h*c; l.inputs = l.outputs; l.index = index; l.output = calloc(l.outputs*batch, sizeof(float));; #ifdef GPU l.output_gpu = cuda_make_array(l.output, l.outputs*batch); #endif return l; } // -------------- reorg_layer.c -------------- // reorg_layer.c layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse) { layer l = { 0 }; l.type = REORG; l.batch = batch; l.stride = stride; l.h = h; l.w = w; l.c = c; if (reverse) { l.out_w = w*stride; l.out_h = h*stride; l.out_c = c / (stride*stride); } else { l.out_w = w / stride; l.out_h = h / stride; l.out_c = c*(stride*stride); } l.reverse = reverse; fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; int output_size = l.out_h * l.out_w * l.out_c * batch; l.output = calloc(output_size, sizeof(float)); l.output_int8 = calloc(output_size, sizeof(int8_t)); //l.delta = calloc(output_size, sizeof(float)); // commented only for this custom version of Yolo v2 //l.forward = forward_reorg_layer; //l.backward = backward_reorg_layer; #ifdef GPU // commented only for this custom version of Yolo v2 //l.forward_gpu = forward_reorg_layer_gpu; //l.backward_gpu = backward_reorg_layer_gpu; l.output_gpu = cuda_make_array(l.output, output_size); cudaError_t status; status = cudaMalloc((void **)&(l.output_gpu_int8), sizeof(int8_t)*output_size); //l.delta_gpu = cuda_make_array(l.delta, output_size); #endif #ifdef OPENCL l.output_ocl = ocl_make_array(l.output, output_size); #endif return l; } // -------------- route_layer.c -------------- // route_layer.c route_layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes) { fprintf(stderr, "route "); route_layer l = { 0 }; l.type = ROUTE; l.batch = batch; l.n = n; l.input_layers = input_layers; l.input_sizes = input_sizes; int i; int outputs = 0; for (i = 0; i < n; ++i) { fprintf(stderr, " %d", input_layers[i]); outputs += input_sizes[i]; } fprintf(stderr, "\n"); l.outputs = outputs; l.inputs = outputs; //l.delta = calloc(outputs*batch, sizeof(float)); l.output = calloc(outputs*batch, sizeof(float)); l.output_int8 = calloc(outputs*batch, sizeof(int8_t)); // commented only for this custom version of Yolo v2 //l.forward = forward_route_layer; //l.backward = backward_route_layer; #ifdef GPU // commented only for this custom version of Yolo v2 //l.forward_gpu = forward_route_layer_gpu; //l.backward_gpu = backward_route_layer_gpu; //l.delta_gpu = cuda_make_array(l.delta, outputs*batch); l.output_gpu = cuda_make_array(l.output, outputs*batch); cudaError_t status; status = cudaMalloc((void **)&(l.output_gpu_int8), sizeof(int8_t)*outputs*batch); #endif #ifdef OPENCL l.output_ocl = ocl_make_array(l.output, outputs*batch); #endif return l; } // -------------- yolo_layer.c -------------- layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes, int max_boxes) { int i; layer l = { 0 }; l.type = YOLO; l.n = n; l.total = total; l.batch = batch; l.h = h; l.w = w; l.c = n*(classes + 4 + 1); l.out_w = l.w; l.out_h = l.h; l.out_c = l.c; l.classes = classes; l.cost = calloc(1, sizeof(float)); l.biases = calloc(total * 2, sizeof(float)); if (mask) l.mask = mask; else { l.mask = calloc(n, sizeof(int)); for (i = 0; i < n; ++i) { l.mask[i] = i; } } //l.bias_updates = calloc(n * 2, sizeof(float)); l.outputs = h*w*n*(classes + 4 + 1); l.inputs = l.outputs; l.max_boxes = max_boxes; l.truths = l.max_boxes*(4 + 1); // 90*(4 + 1); //l.delta = calloc(batch*l.outputs, sizeof(float)); l.output = calloc(batch*l.outputs, sizeof(float)); for (i = 0; i < total * 2; ++i) { l.biases[i] = .5; } #ifdef GPU l.output_gpu = cuda_make_array(l.output, batch*l.outputs); free(l.output); if (cudaSuccess == cudaHostAlloc(&l.output, batch*l.outputs * sizeof(float), cudaHostRegisterMapped)) l.output_pinned = 1; else { cudaGetLastError(); // reset CUDA-error l.output = calloc(batch*l.outputs, sizeof(float)); } #endif fprintf(stderr, "yolo\n"); srand(0); return l; } // -------------- region_layer.c -------------- // region_layer.c region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords) { region_layer l = { 0 }; l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.classes = classes; l.coords = coords; l.cost = calloc(1, sizeof(float)); l.biases = calloc(n * 2, sizeof(float)); //l.bias_updates = calloc(n * 2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.truths = 30 * (5); //l.delta = calloc(batch*l.outputs, sizeof(float)); l.output = calloc(batch*l.outputs, sizeof(float)); int i; for (i = 0; i < n * 2; ++i) { l.biases[i] = .5; } // commented only for this custom version of Yolo v2 //l.forward = forward_region_layer; //l.backward = backward_region_layer; #ifdef GPU // commented only for this custom version of Yolo v2 //l.forward_gpu = forward_region_layer_gpu; //l.backward_gpu = backward_region_layer_gpu; l.output_gpu = cuda_make_array(l.output, batch*l.outputs); cudaError_t status; status = cudaMalloc((void **)&(l.output_gpu_int8), sizeof(int8_t)*l.outputs*batch); //l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs); #endif #ifdef OPENCL l.output_ocl = ocl_make_array(l.output, batch*l.outputs); #endif fprintf(stderr, "detection\n"); srand(0); return l; } // -------------- maxpool_layer.c -------------- // maxpool_layer.c maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding) { maxpool_layer l = { 0 }; l.type = MAXPOOL; l.batch = batch; l.h = h; l.w = w; l.c = c; l.pad = padding; l.out_w = (w + padding - size) / stride + 1; l.out_h = (h + padding - size) / stride + 1; l.out_c = c; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; l.size = size; l.stride = stride; int output_size = l.out_h * l.out_w * l.out_c * batch; l.indexes = calloc(output_size, sizeof(int)); l.output = calloc(output_size, sizeof(float)); l.output_int8 = calloc(output_size, sizeof(int8_t)); //l.delta = calloc(output_size, sizeof(float)); // commented only for this custom version of Yolo v2 //l.forward = forward_maxpool_layer; //l.backward = backward_maxpool_layer; #ifdef GPU // commented only for this custom version of Yolo v2 //l.forward_gpu = forward_maxpool_layer_gpu; //l.backward_gpu = backward_maxpool_layer_gpu; l.indexes_gpu = cuda_make_int_array(output_size); l.output_gpu = cuda_make_array(l.output, output_size); cudaError_t status; status = cudaMalloc((void **)&(l.output_gpu_int8), sizeof(int8_t)*output_size); //l.delta_gpu = cuda_make_array(l.delta, output_size); cudnnStatus_t maxpool_status; maxpool_status = cudnnCreatePoolingDescriptor(&l.poolingDesc); maxpool_status = cudnnSetPooling2dDescriptor( l.poolingDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN l.size, l.size, 0, //l.pad, 0, //l.pad, l.stride, l.stride); cudnnCreateTensorDescriptor(&l.srcTensorDesc); cudnnCreateTensorDescriptor(&l.dstTensorDesc); cudnnSetTensor4dDescriptor(l.srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.c, l.h, l.w); cudnnSetTensor4dDescriptor(l.dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w); #endif #ifdef OPENCL l.indexes_ocl = ocl_make_int_array(output_size); l.output_ocl = ocl_make_array(l.output, output_size); #endif fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } // -------------- convolutional_layer.c -------------- // convolutional_layer.c size_t get_workspace_size(layer l) { #ifdef CUDNN if (gpu_index >= 0) { size_t most = 0; size_t s = 0; cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle(), l.srcTensorDesc, l.weightDesc, l.convDesc, l.dstTensorDesc, l.fw_algo, &s); /* if (s > most) most = s; cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle(), l.srcTensorDesc, l.ddstTensorDesc, l.convDesc, l.dweightDesc, l.bf_algo, &s); if (s > most) most = s; cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle(), l.weightDesc, l.ddstTensorDesc, l.convDesc, l.dsrcTensorDesc, l.bd_algo, &s);*/ if (s > most) most = s; return most; } #endif if (l.xnor) { size_t re_packed_input_size = l.c * l.w * l.h * sizeof(float); size_t workspace_size = (size_t)l.bit_align*l.size*l.size*l.c * sizeof(float); if (workspace_size < re_packed_input_size) workspace_size = re_packed_input_size; return workspace_size; } return (size_t)l.out_h*l.out_w*l.size*l.size*l.c * sizeof(float); } int convolutional_out_height(convolutional_layer l) { return (l.h + 2 * l.pad - l.size) / l.stride + 1; } int convolutional_out_width(convolutional_layer l) { return (l.w + 2 * l.pad - l.size) / l.stride + 1; } // convolutional_layer.c convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam, int quantized, int use_bin_output) { int i; convolutional_layer l = { 0 }; l.type = CONVOLUTIONAL; l.quantized = quantized; l.h = h; l.w = w; l.c = c; l.n = n; l.binary = binary; l.xnor = xnor; l.use_bin_output = use_bin_output; l.batch = batch; l.stride = stride; l.size = size; l.pad = padding; l.batch_normalize = batch_normalize; l.weights = calloc(c*n*size*size, sizeof(float)); l.weights_int8 = calloc(c*n*size*size, sizeof(int8_t)); //l.weight_updates = calloc(c*n*size*size, sizeof(float)); l.biases = calloc(n, sizeof(float)); l.biases_quant = calloc(n, sizeof(float)); //l.bias_updates = calloc(n, sizeof(float)); // float scale = 1./sqrt(size*size*c); float scale = sqrt(2. / (size*size*c)); for (i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1); int out_h = convolutional_out_height(l); int out_w = convolutional_out_width(l); l.out_h = out_h; l.out_w = out_w; l.out_c = n; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = l.w * l.h * l.c; l.output = calloc(l.batch*l.outputs, sizeof(float)); l.output_int8 = calloc(l.batch*l.outputs, sizeof(int8_t)); //l.delta = calloc(l.batch*l.outputs, sizeof(float)); // commented only for this custom version of Yolo v2 ///l.forward = forward_convolutional_layer; ///l.backward = backward_convolutional_layer; ///l.update = update_convolutional_layer; if (binary) { l.binary_weights = calloc(c*n*size*size, sizeof(float)); l.cweights = calloc(c*n*size*size, sizeof(char)); l.scales = calloc(n, sizeof(float)); } if (xnor) { l.binary_weights = calloc(c*n*size*size, sizeof(float)); l.binary_input = calloc(l.inputs*l.batch, sizeof(float)); int align = 32;// 8; int src_align = l.out_h*l.out_w; l.bit_align = src_align + (align - src_align % align); l.mean_arr = calloc(l.n, sizeof(float)); const size_t new_c = l.c / 32; size_t in_re_packed_input_size = new_c * l.w * l.h + 1; l.bin_re_packed_input = calloc(in_re_packed_input_size, sizeof(uint32_t)); l.lda_align = 256; // AVX2 int k = l.size*l.size*l.c; size_t k_aligned = k + (l.lda_align - k%l.lda_align); size_t t_bit_input_size = k_aligned * l.bit_align / 8; l.t_bit_input = calloc(t_bit_input_size, sizeof(char)); } if (batch_normalize) { l.scales = calloc(n, sizeof(float)); //l.scale_updates = calloc(n, sizeof(float)); for (i = 0; i < n; ++i) { l.scales[i] = 1; } l.mean = calloc(n, sizeof(float)); l.variance = calloc(n, sizeof(float)); //l.mean_delta = calloc(n, sizeof(float)); //l.variance_delta = calloc(n, sizeof(float)); l.rolling_mean = calloc(n, sizeof(float)); l.rolling_variance = calloc(n, sizeof(float)); l.x = calloc(l.batch*l.outputs, sizeof(float)); l.x_norm = calloc(l.batch*l.outputs, sizeof(float)); } if (adam) { l.adam = 1; l.m = calloc(c*n*size*size, sizeof(float)); l.v = calloc(c*n*size*size, sizeof(float)); } #ifdef GPU // commented only for this custom version of Yolo v2 //l.forward_gpu = forward_convolutional_layer_gpu; //l.backward_gpu = backward_convolutional_layer_gpu; //l.update_gpu = update_convolutional_layer_gpu; if (gpu_index >= 0) { //if (adam) { // l.m_gpu = cuda_make_array(l.m, c*n*size*size); // l.v_gpu = cuda_make_array(l.v, c*n*size*size); //} l.weights_gpu = cuda_make_array(l.weights, c*n*size*size); //l.weight_updates_gpu = cuda_make_array(l.weight_updates, c*n*size*size); l.biases_gpu = cuda_make_array(l.biases, n); //l.bias_updates_gpu = cuda_make_array(l.bias_updates, n); //l.delta_gpu = cuda_make_array(l.delta, l.batch*out_h*out_w*n); l.output_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); cudaError_t status; status = cudaMalloc((void **)&(l.output_gpu_int8), sizeof(int8_t)*l.batch*out_h*out_w*n); //if (binary) { // l.binary_weights_gpu = cuda_make_array(l.weights, c*n*size*size); //} if (xnor) { l.binary_weights_gpu = cuda_make_array(l.weights, c*n*size*size); l.mean_arr_gpu = cuda_make_array(0, l.n); l.binary_input_gpu = cuda_make_array(0, l.inputs*l.batch); } if (batch_normalize) { //l.mean_gpu = cuda_make_array(l.mean, n); //l.variance_gpu = cuda_make_array(l.variance, n); l.rolling_mean_gpu = cuda_make_array(l.mean, n); l.rolling_variance_gpu = cuda_make_array(l.variance, n); //l.mean_delta_gpu = cuda_make_array(l.mean, n); //l.variance_delta_gpu = cuda_make_array(l.variance, n); l.scales_gpu = cuda_make_array(l.scales, n); //l.scale_updates_gpu = cuda_make_array(l.scale_updates, n); l.x_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); //l.x_norm_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); } #ifdef CUDNN cudnnCreateTensorDescriptor(&l.biasTensorDesc); cudnnCreateActivationDescriptor(&l.activationDesc); cudnnCreateTensorDescriptor(&l.srcTensorDesc); cudnnCreateTensorDescriptor(&l.dstTensorDesc); cudnnCreateFilterDescriptor(&l.weightDesc); //cudnnCreateTensorDescriptor(&l.dsrcTensorDesc); //cudnnCreateTensorDescriptor(&l.ddstTensorDesc); //cudnnCreateFilterDescriptor(&l.dweightDesc); cudnnCreateConvolutionDescriptor(&l.convDesc); cudnn_convolutional_setup(&l); #endif } #endif #ifdef OPENCL //if (gpu_index >= 0) { l.weights_ocl = ocl_make_array(l.weights, c*n*size*size); l.biases_ocl = ocl_make_array(l.biases, n); l.output_ocl = ocl_make_array(l.output, l.batch*out_h*out_w*n); if (batch_normalize) { l.rolling_mean_ocl = ocl_make_array(l.rolling_mean, n); // l.mean l.rolling_variance_ocl = ocl_make_array(l.rolling_variance, n); // l.variance l.scales_ocl = ocl_make_array(l.scales, n); l.x_ocl = ocl_make_array(l.output, l.batch*out_h*out_w*n); } //} #endif l.workspace_size = get_workspace_size(l); l.activation = activation; l.bflops = (2.0 * l.n * l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.; if (l.xnor && l.use_bin_output) fprintf(stderr, "convXB"); else if (l.xnor) fprintf(stderr, "convX "); else fprintf(stderr, "conv "); fprintf(stderr, "%5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); return l; } // -------------- image.c -------------- // image.c void rgbgr_image(image im) { int i; for (i = 0; i < im.w*im.h; ++i) { float swap = im.data[i]; im.data[i] = im.data[i + im.w*im.h * 2]; im.data[i + im.w*im.h * 2] = swap; } } // image.c image make_empty_image(int w, int h, int c) { image out; out.data = 0; out.h = h; out.w = w; out.c = c; return out; } // image.c void free_image(image m) { if (m.data) { free(m.data); } } // image.c void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b) { //normalize_image(a); int i; if (x1 < 0) x1 = 0; if (x1 >= a.w) x1 = a.w - 1; if (x2 < 0) x2 = 0; if (x2 >= a.w) x2 = a.w - 1; if (y1 < 0) y1 = 0; if (y1 >= a.h) y1 = a.h - 1; if (y2 < 0) y2 = 0; if (y2 >= a.h) y2 = a.h - 1; for (i = x1; i <= x2; ++i) { a.data[i + y1*a.w + 0 * a.w*a.h] = r; a.data[i + y2*a.w + 0 * a.w*a.h] = r; a.data[i + y1*a.w + 1 * a.w*a.h] = g; a.data[i + y2*a.w + 1 * a.w*a.h] = g; a.data[i + y1*a.w + 2 * a.w*a.h] = b; a.data[i + y2*a.w + 2 * a.w*a.h] = b; } for (i = y1; i <= y2; ++i) { a.data[x1 + i*a.w + 0 * a.w*a.h] = r; a.data[x2 + i*a.w + 0 * a.w*a.h] = r; a.data[x1 + i*a.w + 1 * a.w*a.h] = g; a.data[x2 + i*a.w + 1 * a.w*a.h] = g; a.data[x1 + i*a.w + 2 * a.w*a.h] = b; a.data[x2 + i*a.w + 2 * a.w*a.h] = b; } } // image.c void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b) { int i; for (i = 0; i < w; ++i) { draw_box(a, x1 + i, y1 + i, x2 - i, y2 - i, r, g, b); } } // image.c image make_image(int w, int h, int c) { image out = make_empty_image(w, h, c); out.data = calloc(h*w*c, sizeof(float)); return out; } // image.c float get_pixel(image m, int x, int y, int c) { assert(x < m.w && y < m.h && c < m.c); return m.data[c*m.h*m.w + y*m.w + x]; } // image.c void set_pixel(image m, int x, int y, int c, float val) { if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return; assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] = val; } // image.c void add_pixel(image m, int x, int y, int c, float val) { assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] += val; } // image.c image resize_image(image im, int w, int h) { image resized = make_image(w, h, im.c); image part = make_image(w, im.h, im.c); int r, c, k; float w_scale = (float)(im.w - 1) / (w - 1); float h_scale = (float)(im.h - 1) / (h - 1); for (k = 0; k < im.c; ++k) { for (r = 0; r < im.h; ++r) { for (c = 0; c < w; ++c) { float val = 0; if (c == w - 1 || im.w == 1) { val = get_pixel(im, im.w - 1, r, k); } else { float sx = c*w_scale; int ix = (int)sx; float dx = sx - ix; val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix + 1, r, k); } set_pixel(part, c, r, k, val); } } } for (k = 0; k < im.c; ++k) { for (r = 0; r < h; ++r) { float sy = r*h_scale; int iy = (int)sy; float dy = sy - iy; for (c = 0; c < w; ++c) { float val = (1 - dy) * get_pixel(part, c, iy, k); set_pixel(resized, c, r, k, val); } if (r == h - 1 || im.h == 1) continue; for (c = 0; c < w; ++c) { float val = dy * get_pixel(part, c, iy + 1, k); add_pixel(resized, c, r, k, val); } } } free_image(part); return resized; } // image.c image load_image(char *filename, int w, int h, int c) { #ifdef OPENCV image out = load_image_cv(filename, c); #else image out = load_image_stb(filename, c); #endif if ((h && w) && (h != out.h || w != out.w)) { image resized = resize_image(out, w, h); free_image(out); out = resized; } return out; } // image.c image load_image_stb(char *filename, int channels) { int w, h, c; unsigned char *data = stbi_load(filename, &w, &h, &c, channels); if (!data) { fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason()); exit(0); } if (channels) c = channels; int i, j, k; image im = make_image(w, h, c); for (k = 0; k < c; ++k) { for (j = 0; j < h; ++j) { for (i = 0; i < w; ++i) { int dst_index = i + w*j + w*h*k; int src_index = k + c*i + c*w*j; im.data[dst_index] = (float)data[src_index] / 255.; } } } free(data); return im; } #ifdef OPENCV // image.c image ipl_to_image(IplImage* src) { unsigned char *data = (unsigned char *)src->imageData; int h = src->height; int w = src->width; int c = src->nChannels; int step = src->widthStep; image out = make_image(w, h, c); int i, j, k, count = 0;; for (k = 0; k < c; ++k) { for (i = 0; i < h; ++i) { for (j = 0; j < w; ++j) { out.data[count++] = data[i*step + j*c + k] / 255.; } } } return out; } // image.c image load_image_cv(char *filename, int channels) { IplImage* src = 0; int flag = -1; if (channels == 0) flag = -1; else if (channels == 1) flag = 0; else if (channels == 3) flag = 1; else { fprintf(stderr, "OpenCV can't force load with %d channels\n", channels); } if ((src = cvLoadImage(filename, flag)) == 0) { fprintf(stderr, "Cannot load image \"%s\"\n", filename); char buff[256]; sprintf(buff, "echo %s >> bad.list", filename); system(buff); return make_image(10, 10, 3); //exit(0); } image out = ipl_to_image(src); cvReleaseImage(&src); rgbgr_image(out); return out; } #endif // OPENCV // image.c image copy_image(image p) { image copy = p; copy.data = calloc(p.h*p.w*p.c, sizeof(float)); memcpy(copy.data, p.data, p.h*p.w*p.c * sizeof(float)); return copy; } // image.c void constrain_image(image im) { int i; for (i = 0; i < im.w*im.h*im.c; ++i) { if (im.data[i] < 0) im.data[i] = 0; if (im.data[i] > 1) im.data[i] = 1; } } #ifdef OPENCV // image.c void show_image_cv(image p, const char *name) { int x, y, k; image copy = copy_image(p); constrain_image(copy); if (p.c == 3) rgbgr_image(copy); char buff[256]; sprintf(buff, "%s", name); IplImage *disp = cvCreateImage(cvSize(p.w, p.h), IPL_DEPTH_8U, p.c); int step = disp->widthStep; cvNamedWindow(buff, CV_WINDOW_NORMAL); for (y = 0; y < p.h; ++y) { for (x = 0; x < p.w; ++x) { for (k = 0; k < p.c; ++k) { disp->imageData[y*step + x*p.c + k] = (unsigned char)(get_pixel(copy, x, y, k) * 255); } } } free_image(copy); cvShowImage(buff, disp); cvReleaseImage(&disp); } // image.c void show_image_cv_ipl(IplImage *disp, const char *name) { if (disp == NULL) return; char buff[256]; sprintf(buff, "%s", name); cvNamedWindow(buff, CV_WINDOW_NORMAL); cvShowImage(buff, disp); } #endif // image.c void save_image_png(image im, const char *name) { char buff[256]; sprintf(buff, "%s.png", name); unsigned char *data = calloc(im.w*im.h*im.c, sizeof(char)); int i, k; for (k = 0; k < im.c; ++k) { for (i = 0; i < im.w*im.h; ++i) { data[i*im.c + k] = (unsigned char)(255 * im.data[i + k*im.w*im.h]); } } int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c); free(data); if (!success) fprintf(stderr, "Failed to write image %s\n", buff); } // image.c void show_image(image p, const char *name) { #ifdef OPENCV show_image_cv(p, name); #else fprintf(stderr, "Not compiled with OpenCV, saving to %s.png instead\n", name); save_image_png(p, name); #endif } // image.c float get_color(int c, int x, int max) { static float colors[6][3] = { { 1,0,1 },{ 0,0,1 },{ 0,1,1 },{ 0,1,0 },{ 1,1,0 },{ 1,0,0 } }; float ratio = ((float)x / max) * 5; int i = floor(ratio); int j = ceil(ratio); ratio -= i; float r = (1 - ratio) * colors[i][c] + ratio*colors[j][c]; //printf("%f\n", r); return r; } // -------------- option_list.c -------------------- // option_list.c typedef struct { char *key; char *val; int used; } kvp; // option_list.c void option_insert(list *l, char *key, char *val) { kvp *p = malloc(sizeof(kvp)); p->key = key; p->val = val; p->used = 0; list_insert(l, p); } // option_list.c int read_option(char *s, list *options) { size_t i; size_t len = strlen(s); char *val = 0; for (i = 0; i < len; ++i) { if (s[i] == '=') { s[i] = '\0'; val = s + i + 1; break; } } if (i == len - 1) return 0; char *key = s; option_insert(options, key, val); return 1; } // option_list.c list *read_data_cfg(char *filename) { FILE *file = fopen(filename, "r"); if (file == 0) file_error(filename); char *line; int nu = 0; list *options = make_list(); while ((line = fgetl(file)) != 0) { ++nu; strip(line); switch (line[0]) { case '\0': case '#': case ';': free(line); break; default: if (!read_option(line, options)) { fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return options; } // option_list.c void option_unused(list *l) { node *n = l->front; while (n) { kvp *p = (kvp *)n->val; if (!p->used) { fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val); } n = n->next; } } // option_list.c char *option_find(list *l, char *key) { node *n = l->front; while (n) { kvp *p = (kvp *)n->val; if (strcmp(p->key, key) == 0) { p->used = 1; return p->val; } n = n->next; } return 0; } // option_list.c char *option_find_str(list *l, char *key, char *def) { char *v = option_find(l, key); if (v) return v; if (def) fprintf(stderr, "%s: Using default '%s'\n", key, def); return def; } // option_list.c int option_find_int(list *l, char *key, int def) { char *v = option_find(l, key); if (v) return atoi(v); fprintf(stderr, "%s: Using default '%d'\n", key, def); return def; } // option_list.c int option_find_int_quiet(list *l, char *key, int def) { char *v = option_find(l, key); if (v) return atoi(v); return def; } // option_list.c float option_find_float_quiet(list *l, char *key, float def) { char *v = option_find(l, key); if (v) return atof(v); return def; } // option_list.c float option_find_float(list *l, char *key, float def) { char *v = option_find(l, key); if (v) return atof(v); fprintf(stderr, "%s: Using default '%lf'\n", key, def); return def; } // -------------- parser.c -------------------- // parser.c typedef struct size_params { int quantized; int batch; int inputs; int h; int w; int c; int index; int time_steps; network net; } size_params; // parser.c typedef struct { char *type; list *options; }section; // parser.c list *read_cfg(char *filename) { FILE *file = fopen(filename, "r"); if (file == 0) file_error(filename); char *line; int nu = 0; list *sections = make_list(); section *current = 0; while ((line = fgetl(file)) != 0) { ++nu; strip(line); switch (line[0]) { case '[': current = malloc(sizeof(section)); list_insert(sections, current); current->options = make_list(); current->type = line; break; case '\0': case '#': case ';': free(line); break; default: if (!read_option(line, current->options)) { fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return sections; } // parser.c void load_convolutional_weights_cpu(layer l, FILE *fp) { int num = l.n*l.c*l.size*l.size; fread(l.biases, sizeof(float), l.n, fp); if (l.batch_normalize && (!l.dontloadscales)) { fread(l.scales, sizeof(float), l.n, fp); fread(l.rolling_mean, sizeof(float), l.n, fp); fread(l.rolling_variance, sizeof(float), l.n, fp); } fread(l.weights, sizeof(float), num, fp); /* if (l.adam) { fread(l.m, sizeof(float), num, fp); fread(l.v, sizeof(float), num, fp); } if (l.flipped) { transpose_matrix(l.weights, l.c*l.size*l.size, l.n); }*/ //if (l.binary) binarize_weights(l.weights, l.n, l.c*l.size*l.size, l.weights); #ifdef GPU if (gpu_index >= 0) { push_convolutional_layer(l); } #endif #ifdef OPENCL //if (gpu_index >= 0) { ocl_push_convolutional_layer(l); //} #endif } // parser.c void load_weights_upto_cpu(network *net, char *filename, int cutoff) { #ifdef GPU if (net->gpu_index >= 0) { cuda_set_device(net->gpu_index); } #endif fprintf(stderr, "Loading weights from %s...", filename); fflush(stdout); FILE *fp = fopen(filename, "rb"); if (!fp) file_error(filename); int major; int minor; int revision; fread(&major, sizeof(int), 1, fp); fread(&minor, sizeof(int), 1, fp); fread(&revision, sizeof(int), 1, fp); if ((major * 10 + minor) >= 2) { fread(net->seen, sizeof(uint64_t), 1, fp); } else { int iseen = 0; fread(&iseen, sizeof(int), 1, fp); *net->seen = iseen; } //int transpose = (major > 1000) || (minor > 1000); int i; for (i = 0; i < net->n && i < cutoff; ++i) { layer l = net->layers[i]; if (l.dontload) continue; if (l.type == CONVOLUTIONAL) { load_convolutional_weights_cpu(l, fp); } } fprintf(stderr, "Done!\n"); fclose(fp); } void save_weights_int8(network net, char *filename, int cutoff) { fprintf(stderr, "Saving weights to %s\n", filename); FILE *fp = fopen(filename, "wb"); if(!fp) file_error(filename); int i; printf("%d \n",cutoff); for(i = 0; i < cutoff; i++){ layer l = net.layers[i]; if(l.type == CONVOLUTIONAL){ int num = l.n*l.c*l.size*l.size; fwrite(l.biases, sizeof(float), l.n, fp); if (i >= 1 && l.activation != LINEAR){ fwrite(l.weights, sizeof(uint8_t), num, fp); } else{ fwrite(l.weights, sizeof(float), num, fp); } fwrite(l.weights_int8, sizeof(uint8_t), num, fp); fwrite(&l.weights_quant_multipler, sizeof(float), 1, fp); fwrite(&l.input_quant_multipler, sizeof(float), 1, fp); } } fclose(fp); } void load_weights_int8(network *net, char *filename, int cutoff) { fprintf(stderr, "Loading weights from %s... \n", filename); fflush(stdout); FILE *fp = fopen(filename, "rb"); if (!fp) file_error(filename); int i; for (i = 0; i < cutoff; i++) { layer l = net->layers[i]; if(l.type == CONVOLUTIONAL){ int num = l.n*l.c*l.size*l.size; fread(l.biases, sizeof(float), l.n, fp); if (i >= 1 && l.activation != LINEAR){ fread(l.weights, sizeof(uint8_t), num, fp); } else{ fread(l.weights, sizeof(float), num, fp); } fread(l.weights_int8, sizeof(uint8_t), num, fp); fread(&net->layers[i].weights_quant_multipler, sizeof(float), 1, fp); fread(&net->layers[i].input_quant_multipler, sizeof(float), 1, fp); } } fclose(fp); } // parser.c convolutional_layer parse_convolutional(list *options, size_params params) { int n = option_find_int(options, "filters", 1); int size = option_find_int(options, "size", 1); int stride = option_find_int(options, "stride", 1); int pad = option_find_int_quiet(options, "pad", 0); int padding = option_find_int_quiet(options, "padding", 0); if (pad) padding = size / 2; char *activation_s = option_find_str(options, "activation", "logistic"); ACTIVATION activation = get_activation(activation_s); int batch, h, w, c; h = params.h; w = params.w; c = params.c; batch = params.batch; if (!(h && w && c)) error("Layer before convolutional layer must output image."); int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0); int binary = option_find_int_quiet(options, "binary", 0); int xnor = option_find_int_quiet(options, "xnor", 0); int use_bin_output = option_find_int_quiet(options, "bin_output", 0); int quantized = params.quantized; if (params.index == 0 || activation == LINEAR || (params.index > 1 && stride>1) || size==1) quantized = 0; // disable Quantized for 1st and last layers convolutional_layer layer = make_convolutional_layer(batch, h, w, c, n, size, stride, padding, activation, batch_normalize, binary, xnor, params.net.adam, quantized, use_bin_output); layer.flipped = option_find_int_quiet(options, "flipped", 0); layer.dot = option_find_float_quiet(options, "dot", 0); if (params.net.adam) { layer.B1 = params.net.B1; layer.B2 = params.net.B2; layer.eps = params.net.eps; } return layer; } // parser.c layer parse_region(list *options, size_params params) { int coords = option_find_int(options, "coords", 4); int classes = option_find_int(options, "classes", 20); int num = option_find_int(options, "num", 1); layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords); assert(l.outputs == params.inputs); l.log = option_find_int_quiet(options, "log", 0); l.sqrt = option_find_int_quiet(options, "sqrt", 0); l.softmax = option_find_int(options, "softmax", 0); l.max_boxes = option_find_int_quiet(options, "max", 30); l.jitter = option_find_float(options, "jitter", .2); l.rescore = option_find_int_quiet(options, "rescore", 0); l.thresh = option_find_float(options, "thresh", .5); l.classfix = option_find_int_quiet(options, "classfix", 0); l.absolute = option_find_int_quiet(options, "absolute", 0); l.random = option_find_int_quiet(options, "random", 0); l.coord_scale = option_find_float(options, "coord_scale", 1); l.object_scale = option_find_float(options, "object_scale", 1); l.noobject_scale = option_find_float(options, "noobject_scale", 1); l.class_scale = option_find_float(options, "class_scale", 1); l.bias_match = option_find_int_quiet(options, "bias_match", 0); char *tree_file = option_find_str(options, "tree", 0); if (tree_file) l.softmax_tree = read_tree(tree_file); char *map_file = option_find_str(options, "map", 0); if (map_file) l.map = read_map(map_file); char *a = option_find_str(options, "anchors", 0); if (a) { int len = strlen(a); int n = 1; int i; for (i = 0; i < len; ++i) { if (a[i] == ',') ++n; } for (i = 0; i < n; ++i) { float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',') + 1; } } return l; } // parser.c int *parse_yolo_mask(char *a, int *num) { int *mask = 0; if (a) { int len = strlen(a); int n = 1; int i; for (i = 0; i < len; ++i) { if (a[i] == ',') ++n; } mask = calloc(n, sizeof(int)); for (i = 0; i < n; ++i) { int val = atoi(a); mask[i] = val; a = strchr(a, ',') + 1; } *num = n; } return mask; } // parser.c layer parse_yolo(list *options, size_params params) { int classes = option_find_int(options, "classes", 20); int total = option_find_int(options, "num", 1); int num = total; char *a = option_find_str(options, "mask", 0); int *mask = parse_yolo_mask(a, &num); int max_boxes = option_find_int_quiet(options, "max", 90); layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes, max_boxes); if (l.outputs != params.inputs) { printf("Error: l.outputs == params.inputs \n"); printf("filters= in the [convolutional]-layer doesn't correspond to classes= or mask= in [yolo]-layer \n"); exit(EXIT_FAILURE); } //assert(l.outputs == params.inputs); char *map_file = option_find_str(options, "map", 0); if (map_file) l.map = read_map(map_file); l.jitter = option_find_float(options, "jitter", .2); l.focal_loss = option_find_int_quiet(options, "focal_loss", 0); l.ignore_thresh = option_find_float(options, "ignore_thresh", .5); l.truth_thresh = option_find_float(options, "truth_thresh", 1); l.random = option_find_int_quiet(options, "random", 0); a = option_find_str(options, "anchors", 0); if (a) { int len = strlen(a); int n = 1; int i; for (i = 0; i < len; ++i) { if (a[i] == ',') ++n; } for (i = 0; i < n && i < total * 2; ++i) { float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',') + 1; } } return l; } // parser.c softmax_layer parse_softmax(list *options, size_params params) { int groups = option_find_int_quiet(options, "groups", 1); softmax_layer layer = make_softmax_layer(params.batch, params.inputs, groups); layer.temperature = option_find_float_quiet(options, "temperature", 1); char *tree_file = option_find_str(options, "tree", 0); if (tree_file) layer.softmax_tree = read_tree(tree_file); return layer; } // parser.c maxpool_layer parse_maxpool(list *options, size_params params) { int stride = option_find_int(options, "stride", 1); int size = option_find_int(options, "size", stride); int padding = option_find_int_quiet(options, "padding", size - 1); int batch, h, w, c; h = params.h; w = params.w; c = params.c; batch = params.batch; if (!(h && w && c)) error("Layer before maxpool layer must output image."); maxpool_layer layer = make_maxpool_layer(batch, h, w, c, size, stride, padding); return layer; } // parser.c layer parse_reorg(list *options, size_params params) { int stride = option_find_int(options, "stride", 1); int reverse = option_find_int_quiet(options, "reverse", 0); int batch, h, w, c; h = params.h; w = params.w; c = params.c; batch = params.batch; if (!(h && w && c)) error("Layer before reorg layer must output image."); layer layer = make_reorg_layer(batch, w, h, c, stride, reverse); return layer; } // parser.c layer parse_upsample(list *options, size_params params, network net) { int stride = option_find_int(options, "stride", 2); layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride); l.scale = option_find_float_quiet(options, "scale", 1); return l; } // parser.c layer parse_shortcut(list *options, size_params params, network net) { char *l = option_find(options, "from"); int index = atoi(l); if (index < 0) index = params.index + index; int batch = params.batch; layer from = net.layers[index]; layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c); char *activation_s = option_find_str(options, "activation", "linear"); ACTIVATION activation = get_activation(activation_s); s.activation = activation; return s; } // parser.c route_layer parse_route(list *options, size_params params, network net) { char *l = option_find(options, "layers"); int len = strlen(l); if (!l) error("Route Layer must specify input layers"); int n = 1; int i; for (i = 0; i < len; ++i) { if (l[i] == ',') ++n; } int *layers = calloc(n, sizeof(int)); int *sizes = calloc(n, sizeof(int)); for (i = 0; i < n; ++i) { int index = atoi(l); l = strchr(l, ',') + 1; if (index < 0) index = params.index + index; layers[i] = index; sizes[i] = net.layers[index].outputs; } int batch = params.batch; route_layer layer = make_route_layer(batch, n, layers, sizes); convolutional_layer first = net.layers[layers[0]]; layer.out_w = first.out_w; layer.out_h = first.out_h; layer.out_c = first.out_c; for (i = 1; i < n; ++i) { int index = layers[i]; convolutional_layer next = net.layers[index]; if (next.out_w == first.out_w && next.out_h == first.out_h) { layer.out_c += next.out_c; } else { layer.out_h = layer.out_w = layer.out_c = 0; } } return layer; } // parser.c void free_section(section *s) { free(s->type); node *n = s->options->front; while (n) { kvp *pair = (kvp *)n->val; free(pair->key); free(pair); node *next = n->next; free(n); n = next; } free(s->options); free(s); } // parser.c LAYER_TYPE string_to_layer_type(char * type) { if (strcmp(type, "[yolo]") == 0) return YOLO; if (strcmp(type, "[region]") == 0) return REGION; if (strcmp(type, "[conv]") == 0 || strcmp(type, "[convolutional]") == 0) return CONVOLUTIONAL; if (strcmp(type, "[net]") == 0 || strcmp(type, "[network]") == 0) return NETWORK; if (strcmp(type, "[max]") == 0 || strcmp(type, "[maxpool]") == 0) return MAXPOOL; if (strcmp(type, "[reorg]") == 0) return REORG; if (strcmp(type, "[upsample]") == 0) return UPSAMPLE; if (strcmp(type, "[shortcut]") == 0) return SHORTCUT; if (strcmp(type, "[soft]") == 0 || strcmp(type, "[softmax]") == 0) return SOFTMAX; if (strcmp(type, "[route]") == 0) return ROUTE; return BLANK; } // parser.c learning_rate_policy get_policy(char *s) { if (strcmp(s, "random") == 0) return RANDOM; if (strcmp(s, "poly") == 0) return POLY; if (strcmp(s, "constant") == 0) return CONSTANT; if (strcmp(s, "step") == 0) return STEP; if (strcmp(s, "exp") == 0) return EXP; if (strcmp(s, "sigmoid") == 0) return SIG; if (strcmp(s, "steps") == 0) return STEPS; fprintf(stderr, "Couldn't find policy %s, going with constant\n", s); return CONSTANT; } // parser.c void parse_net_options(list *options, network *net) { net->batch = option_find_int(options, "batch", 1); net->learning_rate = option_find_float(options, "learning_rate", .001); net->momentum = option_find_float(options, "momentum", .9); net->decay = option_find_float(options, "decay", .0001); int subdivs = option_find_int(options, "subdivisions", 1); net->time_steps = option_find_int_quiet(options, "time_steps", 1); net->batch /= subdivs; net->batch *= net->time_steps; net->subdivisions = subdivs; char *a = option_find_str(options, "input_calibration", 0); if (a) { int len = strlen(a); int n = 1; int i; for (i = 0; i < len; ++i) { if (a[i] == ',') ++n; } net->input_calibration_size = n; net->input_calibration = (float *)calloc(n, sizeof(float)); for (i = 0; i < n; ++i) { float coef = atof(a); net->input_calibration[i] = coef; a = strchr(a, ',') + 1; } } net->adam = option_find_int_quiet(options, "adam", 0); if (net->adam) { net->B1 = option_find_float(options, "B1", .9); net->B2 = option_find_float(options, "B2", .999); net->eps = option_find_float(options, "eps", .000001); } net->h = option_find_int_quiet(options, "height", 0); net->w = option_find_int_quiet(options, "width", 0); net->c = option_find_int_quiet(options, "channels", 0); net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c); net->max_crop = option_find_int_quiet(options, "max_crop", net->w * 2); net->min_crop = option_find_int_quiet(options, "min_crop", net->w); net->angle = option_find_float_quiet(options, "angle", 0); net->aspect = option_find_float_quiet(options, "aspect", 1); net->saturation = option_find_float_quiet(options, "saturation", 1); net->exposure = option_find_float_quiet(options, "exposure", 1); net->hue = option_find_float_quiet(options, "hue", 0); if (!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied"); char *policy_s = option_find_str(options, "policy", "constant"); net->policy = get_policy(policy_s); net->burn_in = option_find_int_quiet(options, "burn_in", 0); if (net->policy == STEP) { net->step = option_find_int(options, "step", 1); net->scale = option_find_float(options, "scale", 1); } else if (net->policy == STEPS) { char *l = option_find(options, "steps"); char *p = option_find(options, "scales"); if (!l || !p) error("STEPS policy must have steps and scales in cfg file"); int len = strlen(l); int n = 1; int i; for (i = 0; i < len; ++i) { if (l[i] == ',') ++n; } int *steps = calloc(n, sizeof(int)); float *scales = calloc(n, sizeof(float)); for (i = 0; i < n; ++i) { int step = atoi(l); float scale = atof(p); l = strchr(l, ',') + 1; p = strchr(p, ',') + 1; steps[i] = step; scales[i] = scale; } net->scales = scales; net->steps = steps; net->num_steps = n; } else if (net->policy == EXP) { net->gamma = option_find_float(options, "gamma", 1); } else if (net->policy == SIG) { net->gamma = option_find_float(options, "gamma", 1); net->step = option_find_int(options, "step", 1); } else if (net->policy == POLY || net->policy == RANDOM) { net->power = option_find_float(options, "power", 1); } net->max_batches = option_find_int(options, "max_batches", 0); } // parser.c network parse_network_cfg(char *filename, int batch, int quantized) { list *sections = read_cfg(filename); node *n = sections->front; if (!n) error("Config file has no sections"); network net = make_network(sections->size - 1); net.quantized = quantized; net.do_input_calibration = 0; net.gpu_index = gpu_index; size_params params; params.quantized = quantized; section *s = (section *)n->val; list *options = s->options; if (strcmp(s->type, "[net]") == 0 && strcmp(s->type, "[network]") == 0) error("First section must be [net] or [network]"); parse_net_options(options, &net); params.h = net.h; params.w = net.w; params.c = net.c; params.inputs = net.inputs; if (batch > 0) net.batch = batch; params.batch = net.batch; params.time_steps = net.time_steps; params.net = net; size_t workspace_size = 0; n = n->next; int count = 0; free_section(s); fprintf(stderr, "layer filters size input output\n"); while (n) { params.index = count; fprintf(stderr, "%5d ", count); s = (section *)n->val; options = s->options; layer l = { 0 }; LAYER_TYPE lt = string_to_layer_type(s->type); if (lt == CONVOLUTIONAL) { // if(count == 80) params.quantized = 0; // doesn't lost GPU - mAP = 45.61% node *tmp = n->next; if(tmp) tmp = tmp->next; if (tmp) { if (string_to_layer_type(((section *)tmp->val)->type) == YOLO) { params.quantized = 0; // mAP = 53.60% //printf("\n\n i = %d \n\n", count); } } l = parse_convolutional(options, params); } else if (lt == REGION) { l = parse_region(options, params); } else if (lt == YOLO) { l = parse_yolo(options, params); } else if (lt == SOFTMAX) { l = parse_softmax(options, params); net.hierarchy = l.softmax_tree; } else if (lt == MAXPOOL) { l = parse_maxpool(options, params); } else if (lt == REORG) { l = parse_reorg(options, params); } else if (lt == ROUTE) { l = parse_route(options, params, net); } else if (lt == UPSAMPLE) { l = parse_upsample(options, params, net); } else if (lt == SHORTCUT) { l = parse_shortcut(options, params, net); } else { fprintf(stderr, "Type not recognized: %s\n", s->type); } l.dontload = option_find_int_quiet(options, "dontload", 0); l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0); option_unused(options); net.layers[count] = l; if (l.workspace_size > workspace_size) workspace_size = l.workspace_size; free_section(s); n = n->next; ++count; if (n) { params.h = l.out_h; params.w = l.out_w; params.c = l.out_c; params.inputs = l.outputs; } } free_list(sections); net.outputs = get_network_output_size(net); net.output = get_network_output(net); if (workspace_size) { //printf("%ld\n", workspace_size); #ifdef GPU if (gpu_index >= 0) { net.workspace = cuda_make_array(0, (workspace_size - 1) / sizeof(float) + 1); int size = net.layers[0].inputs * net.batch; //get_network_input_size(net) * net.batch; net.input_state_gpu = cuda_make_array(0, size); if (cudaSuccess == cudaHostAlloc(&net.input_pinned_cpu, size * sizeof(float), cudaHostRegisterMapped)) net.input_pinned_cpu_flag = 1; else { cudaGetLastError(); // reset CUDA-error net.input_pinned_cpu = calloc(size, sizeof(float)); } } else { net.workspace = calloc(1, workspace_size); } #else // GPU net.workspace = calloc(1, workspace_size); #endif // GPU #ifdef OPENCL //if (gpu_index >= 0) { net.workspace_ocl = ocl_make_array(0, workspace_size / sizeof(float)); //net.workspace_ocl = ocl_make_array(0, (workspace_size - 1) / sizeof(float) + 1); //net.workspace_ocl = ocl_make_array(NULL, 1024*1024*1024); //} #endif // OPENCL } return net; } // -------------- gettimeofday for Windows-------------------- #if defined(_MSC_VER) int gettimeofday(struct timeval *tv, struct timezone *tz) { FILETIME ft; unsigned __int64 tmpres = 0; static int tzflag; if (NULL != tv) { GetSystemTimeAsFileTime(&ft); tmpres |= ft.dwHighDateTime; tmpres <<= 32; tmpres |= ft.dwLowDateTime; /*converting file time to unix epoch*/ tmpres -= DELTA_EPOCH_IN_MICROSECS; tmpres /= 10; /*convert into microseconds*/ tv->tv_sec = (long)(tmpres / 1000000UL); tv->tv_usec = (long)(tmpres % 1000000UL); } if (NULL != tz) { if (!tzflag) { _tzset(); tzflag++; } tz->tz_minuteswest = _timezone / 60; tz->tz_dsttime = _daylight; } return 0; } #endif // _MSC_VER // ------------------------------------------------------ // Calculate mAP and TP/FP/FN, IoU, F1 #include "pthread.h" //#include "box.h" /* // from: box.h typedef struct { float x, y, w, h; } box; */ float box_iou(box a, box b); typedef enum { CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, LETTERBOX_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA } data_type; typedef struct matrix { int rows, cols; float **vals; } matrix; typedef struct { int w, h; matrix X; matrix y; int shallow; int *num_boxes; box **boxes; } data; typedef struct { int id; float x, y, w, h; float left, right, top, bottom; } box_label; typedef struct load_args { int threads; char **paths; char *path; int n; int m; char **labels; int h; int w; int out_w; int out_h; int nh; int nw; int num_boxes; int min, max, size; int classes; int background; int scale; int small_object; float jitter; int flip; float angle; float aspect; float saturation; float exposure; float hue; data *d; image *im; image *resized; data_type type; tree *hierarchy; } load_args; int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(4 + l.classes + 1) + entry*l.w*l.h + loc; } int yolo_num_detections(layer l, float thresh) { int i, n; int count = 0; for (i = 0; i < l.w*l.h; ++i) { for (n = 0; n < l.n; ++n) { int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); if (l.output[obj_index] > thresh) { ++count; } } } return count; } int num_detections(network *net, float thresh) { int i; int s = 0; for (i = 0; i < net->n; ++i) { layer l = net->layers[i]; if (l.type == YOLO) { s += yolo_num_detections(l, thresh); } if (l.type == DETECTION || l.type == REGION) { s += l.w*l.h*l.n; } } return s; } detection *make_network_boxes(network *net, float thresh, int *num) { layer l = net->layers[net->n - 1]; int i; int nboxes = num_detections(net, thresh); if (num) *num = nboxes; detection *dets = calloc(nboxes, sizeof(detection)); for (i = 0; i < nboxes; ++i) { dets[i].prob = calloc(l.classes, sizeof(float)); if (l.coords > 4) { dets[i].mask = calloc(l.coords - 4, sizeof(float)); } } return dets; } void free_detections(detection *dets, int n) { int i; for (i = 0; i < n; ++i) { free(dets[i].prob); if (dets[i].mask) free(dets[i].mask); } free(dets); } void find_replace(char *str, char *orig, char *rep, char *output) { char buffer[4096] = { 0 }; char *p; sprintf(buffer, "%s", str); if (!(p = strstr(buffer, orig))) { // Is 'orig' even in 'str'? sprintf(output, "%s", str); return; } *p = '\0'; sprintf(output, "%s%s%s", buffer, rep, p + strlen(orig)); } void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative, int letter) { int i; int new_w = 0; int new_h = 0; if (letter) { if (((float)netw / w) < ((float)neth / h)) { new_w = netw; new_h = (h * netw) / w; } else { new_h = neth; new_w = (w * neth) / h; } } else { new_w = netw; new_h = neth; } for (i = 0; i < n; ++i) { box b = dets[i].bbox; b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw); b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth); b.w *= (float)netw / new_w; b.h *= (float)neth / new_h; if (!relative) { b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } // yolo_layer.c box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride) { box b; b.x = (i + x[index + 0 * stride]) / lw; b.y = (j + x[index + 1 * stride]) / lh; b.w = exp(x[index + 2 * stride]) * biases[2 * n] / w; b.h = exp(x[index + 3 * stride]) * biases[2 * n + 1] / h; return b; } // yolo_layer.c int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets, int letter) { int i, j, n; float *predictions = l.output; //if (l.batch == 2) avg_flipped_yolo(l); int count = 0; for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; for (n = 0; n < l.n; ++n) { int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); float objectness = predictions[obj_index]; //if (objectness <= thresh) continue; // incorrect behavior for Nan values if (objectness > thresh) { int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h); dets[count].objectness = objectness; dets[count].classes = l.classes; for (j = 0; j < l.classes; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j); float prob = objectness*predictions[class_index]; dets[count].prob[j] = (prob > thresh) ? prob : 0; } ++count; } } } correct_yolo_boxes(dets, count, w, h, netw, neth, relative, letter); return count; } // get prediction boxes: yolov2_forward_network.c void get_region_boxes_cpu(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map); void custom_get_region_detections(layer l, int w, int h, int net_w, int net_h, float thresh, int *map, float hier, int relative, detection *dets, int letter) { box *boxes = calloc(l.w*l.h*l.n, sizeof(box)); float **probs = calloc(l.w*l.h*l.n, sizeof(float *)); int i, j; for (j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes, sizeof(float *)); get_region_boxes_cpu(l, 1, 1, thresh, probs, boxes, 0, map); for (j = 0; j < l.w*l.h*l.n; ++j) { dets[j].classes = l.classes; dets[j].bbox = boxes[j]; dets[j].objectness = 1; for (i = 0; i < l.classes; ++i) { dets[j].prob[i] = probs[j][i]; } } free(boxes); free_ptrs((void **)probs, l.w*l.h*l.n); //correct_region_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative); correct_yolo_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative, letter); } void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets, int letter) { int j; for (j = 0; j < net->n; ++j) { layer l = net->layers[j]; if (l.type == YOLO) { int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets, letter); dets += count; } if (l.type == REGION) { custom_get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets, letter); //get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets); dets += l.w*l.h*l.n; } } } detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num, int letter) { detection *dets = make_network_boxes(net, thresh, num); fill_network_boxes(net, w, h, thresh, hier, map, relative, dets, letter); return dets; } void *load_thread(void *ptr) { load_args a = *(struct load_args*)ptr; if (a.type == IMAGE_DATA) { *(a.im) = load_image(a.path, 0, 0, 3); *(a.resized) = resize_image(*(a.im), a.w, a.h); //printf(" a.path = %s, a.w = %d, a.h = %d \n", a.path, a.w, a.h); } else if (a.type == LETTERBOX_DATA) { printf(" LETTERBOX_DATA isn't implemented \n"); getchar(); //*(a.im) = load_image(a.path, 0, 0, 0); //*(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else { printf("unknown DATA type = %d \n", a.type); getchar(); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if (pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } box_label *read_boxes(char *filename, int *n) { box_label *boxes = calloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { //file_error(filename); *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while (fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5) { boxes = realloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w / 2; boxes[count].right = x + w / 2; boxes[count].top = y - h / 2; boxes[count].bottom = y + h / 2; ++count; } fclose(file); *n = count; return boxes; } typedef struct { box b; float p; int class_id; int image_index; int truth_flag; int unique_truth_index; } box_prob; int detections_comparator(const void *pa, const void *pb) { box_prob a = *(box_prob *)pa; box_prob b = *(box_prob *)pb; float diff = a.p - b.p; if (diff < 0) return 1; else if (diff > 0) return -1; return 0; } int nms_comparator_v3(const void *pa, const void *pb) { detection a = *(detection *)pa; detection b = *(detection *)pb; float diff = 0; if (b.sort_class >= 0) { diff = a.prob[b.sort_class] - b.prob[b.sort_class]; } else { diff = a.objectness - b.objectness; } if (diff < 0) return 1; else if (diff > 0) return -1; return 0; } void do_nms_sort_v3(detection *dets, int total, int classes, float thresh) { int i, j, k; k = total - 1; for (i = 0; i <= k; ++i) { if (dets[i].objectness == 0) { detection swap = dets[i]; dets[i] = dets[k]; dets[k] = swap; --k; --i; } } total = k + 1; for (k = 0; k < classes; ++k) { for (i = 0; i < total; ++i) { dets[i].sort_class = k; } qsort(dets, total, sizeof(detection), nms_comparator_v3); for (i = 0; i < total; ++i) { //printf(" k = %d, \t i = %d \n", k, i); if (dets[i].prob[k] == 0) continue; box a = dets[i].bbox; for (j = i + 1; j < total; ++j) { box b = dets[j].bbox; if (box_iou(a, b) > thresh) { dets[j].prob[k] = 0; } } } } } void validate_detector_map(char *datacfg, char *cfgfile, char *weightfile, float thresh_calc_avg_iou, int quantized, const float iou_thresh) { list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.txt"); char *difficult_valid_images = option_find_str(options, "difficult", NULL); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network net = parse_network_cfg(cfgfile, 1, quantized); //parse_network_cfg_custom(cfgfile, 1); // set batch=1 if (weightfile) { load_weights_upto_cpu(&net, weightfile, net.n); } //set_batch_network(&net, 1); yolov2_fuse_conv_batchnorm(net); calculate_binary_weights(net); if (quantized) quantinization_and_get_multipliers(net); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); char **paths_dif = NULL; if (difficult_valid_images) { list *plist_dif = get_paths(difficult_valid_images); paths_dif = (char **)list_to_array(plist_dif); } layer l = net.layers[net.n - 1]; int classes = l.classes; int m = plist->size; int i = 0; int t; const float thresh = .005; float nms = .45; //const float iou_thresh = 0.5; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = { 0 }; args.w = net.w; args.h = net.h; args.type = IMAGE_DATA; //args.type = LETTERBOX_DATA; //const float thresh_calc_avg_iou = 0.24; float avg_iou = 0; int tp_for_thresh = 0; int fp_for_thresh = 0; box_prob *detections = calloc(1, sizeof(box_prob)); int detections_count = 0; int unique_truth_count = 0; int *truth_classes_count = calloc(classes, sizeof(int)); for (t = 0; t < nthreads; ++t) { args.path = paths[i + t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } time_t start = time(0); for (i = nthreads; i < m + nthreads; i += nthreads) { fprintf(stderr, "%d\n", i); for (t = 0; t < nthreads && i + t - nthreads < m; ++t) { pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for (t = 0; t < nthreads && i + t < m; ++t) { args.path = paths[i + t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for (t = 0; t < nthreads && i + t - nthreads < m; ++t) { #ifndef WIN32 //if (!quantized) usleep(50000); usleep(50000); // 0.005 sec #endif const int image_index = i + t - nthreads; char *path = paths[image_index]; //char *id = basecfg(path); float *X = val_resized[t].data; //network_predict(net, X); #ifdef GPU if (quantized) { network_predict_gpu_cudnn_quantized(net, X); // quantized //nms = 0.2; } else { network_predict_gpu_cudnn(net, X); } #else // GPU #ifdef OPENCL network_predict_opencl(net, X); #else // OPENCL if (quantized) { network_predict_quantized(net, X); // quantized //nms = 0.2; } else { network_predict_cpu(net, X); } #endif // OPENCL #endif // GPU int nboxes = 0; int letterbox = (args.type == LETTERBOX_DATA); float hier_thresh = 0; detection *dets = get_network_boxes(&net, 1, 1, thresh, hier_thresh, 0, 0, &nboxes, letterbox); //detection *dets = get_network_boxes(&net, val[t].w, val[t].h, thresh, hier_thresh, 0, 1, &nboxes, letterbox); // for letterbox=1 if (nms) do_nms_sort_v3(dets, nboxes, l.classes, nms); char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); //printf(" labelpath = %s \n", labelpath); int i, j; for (j = 0; j < num_labels; ++j) { truth_classes_count[truth[j].id]++; } // difficult box_label *truth_dif = NULL; int num_labels_dif = 0; if (paths_dif) { char *path_dif = paths_dif[image_index]; char labelpath_dif[4096]; find_replace(path_dif, "images", "labels", labelpath_dif); find_replace(labelpath_dif, "JPEGImages", "labels", labelpath_dif); find_replace(labelpath_dif, ".jpg", ".txt", labelpath_dif); find_replace(labelpath_dif, ".JPEG", ".txt", labelpath_dif); find_replace(labelpath_dif, ".png", ".txt", labelpath_dif); truth_dif = read_boxes(labelpath_dif, &num_labels_dif); } const int checkpoint_detections_count = detections_count; for (i = 0; i < nboxes; ++i) { int class_id; for (class_id = 0; class_id < classes; ++class_id) { float prob = dets[i].prob[class_id]; if (prob > 0) { detections_count++; detections = realloc(detections, detections_count * sizeof(box_prob)); detections[detections_count - 1].b = dets[i].bbox; detections[detections_count - 1].p = prob; detections[detections_count - 1].image_index = image_index; detections[detections_count - 1].class_id = class_id; detections[detections_count - 1].truth_flag = 0; detections[detections_count - 1].unique_truth_index = -1; int truth_index = -1; float max_iou = 0; for (j = 0; j < num_labels; ++j) { box t = { truth[j].x, truth[j].y, truth[j].w, truth[j].h }; //printf(" IoU = %f, prob = %f, class_id = %d, truth[j].id = %d \n", // box_iou(dets[i].bbox, t), prob, class_id, truth[j].id); float current_iou = box_iou(dets[i].bbox, t); if (current_iou > iou_thresh && class_id == truth[j].id) { if (current_iou > max_iou) { max_iou = current_iou; truth_index = unique_truth_count + j; } } } // best IoU if (truth_index > -1) { detections[detections_count - 1].truth_flag = 1; detections[detections_count - 1].unique_truth_index = truth_index; } else { // if object is difficult then remove detection for (j = 0; j < num_labels_dif; ++j) { box t = { truth_dif[j].x, truth_dif[j].y, truth_dif[j].w, truth_dif[j].h }; float current_iou = box_iou(dets[i].bbox, t); if (current_iou > iou_thresh && class_id == truth_dif[j].id) { --detections_count; break; } } } // calc avg IoU, true-positives, false-positives for required Threshold if (prob > thresh_calc_avg_iou) { int z, found = 0; for (z = checkpoint_detections_count; z < detections_count - 1; ++z) if (detections[z].unique_truth_index == truth_index) { found = 1; break; } if (truth_index > -1 && found == 0) { avg_iou += max_iou; ++tp_for_thresh; } else fp_for_thresh++; } } } } unique_truth_count += num_labels; free_detections(dets, nboxes); //free(id); free_image(val[t]); free_image(val_resized[t]); } } if ((tp_for_thresh + fp_for_thresh) > 0) { avg_iou = avg_iou / (tp_for_thresh + fp_for_thresh); } // SORT(detections) qsort(detections, detections_count, sizeof(box_prob), detections_comparator); typedef struct { double precision; double recall; int tp, fp, fn; } pr_t; // for PR-curve pr_t **pr = calloc(classes, sizeof(pr_t*)); for (i = 0; i < classes; ++i) { pr[i] = calloc(detections_count, sizeof(pr_t)); } printf("detections_count = %d, unique_truth_count = %d \n", detections_count, unique_truth_count); int *truth_flags = calloc(unique_truth_count, sizeof(int)); int rank; for (rank = 0; rank < detections_count; ++rank) { if (rank % 100 == 0) printf(" rank = %d of ranks = %d \r", rank, detections_count); if (rank > 0) { int class_id; for (class_id = 0; class_id < classes; ++class_id) { pr[class_id][rank].tp = pr[class_id][rank - 1].tp; pr[class_id][rank].fp = pr[class_id][rank - 1].fp; } } box_prob d = detections[rank]; // if (detected && isn't detected before) if (d.truth_flag == 1) { if (truth_flags[d.unique_truth_index] == 0) { truth_flags[d.unique_truth_index] = 1; pr[d.class_id][rank].tp++; // true-positive } } else { pr[d.class_id][rank].fp++; // false-positive } for (i = 0; i < classes; ++i) { const int tp = pr[i][rank].tp; const int fp = pr[i][rank].fp; const int fn = truth_classes_count[i] - tp; // false-negative = objects - true-positive pr[i][rank].fn = fn; if ((tp + fp) > 0) pr[i][rank].precision = (double)tp / (double)(tp + fp); else pr[i][rank].precision = 0; if ((tp + fn) > 0) pr[i][rank].recall = (double)tp / (double)(tp + fn); else pr[i][rank].recall = 0; } } free(truth_flags); double mean_average_precision = 0; for (i = 0; i < classes; ++i) { double avg_precision = 0; int point; for (point = 0; point < 11; ++point) { double cur_recall = point * 0.1; double cur_precision = 0; for (rank = 0; rank < detections_count; ++rank) { if (pr[i][rank].recall >= cur_recall) { // > or >= if (pr[i][rank].precision > cur_precision) { cur_precision = pr[i][rank].precision; } } } //printf("class_id = %d, point = %d, cur_recall = %.4f, cur_precision = %.4f \n", i, point, cur_recall, cur_precision); avg_precision += cur_precision; } avg_precision = avg_precision / 11; printf("class_id = %d, name = %s, \t ap = %2.2f %% \n", i, names[i], avg_precision * 100); mean_average_precision += avg_precision; } const float cur_precision = (float)tp_for_thresh / ((float)tp_for_thresh + (float)fp_for_thresh); const float cur_recall = (float)tp_for_thresh / ((float)tp_for_thresh + (float)(unique_truth_count - tp_for_thresh)); const float f1_score = 2.F * cur_precision * cur_recall / (cur_precision + cur_recall); printf(" for thresh = %1.2f, precision = %1.2f, recall = %1.2f, F1-score = %1.2f \n", thresh_calc_avg_iou, cur_precision, cur_recall, f1_score); printf(" for thresh = %0.2f, TP = %d, FP = %d, FN = %d, average IoU = %2.2f %% \n", thresh_calc_avg_iou, tp_for_thresh, fp_for_thresh, unique_truth_count - tp_for_thresh, avg_iou * 100); mean_average_precision = mean_average_precision / classes; if (iou_thresh == 0.5) { printf("\n mean average precision (mAP) = %f, or %2.2f %% \n", mean_average_precision, mean_average_precision * 100); } else { printf("\n average precision (AP) = %f, or %2.2f %% for IoU threshold = %f \n", mean_average_precision, mean_average_precision * 100, iou_thresh); } for (i = 0; i < classes; ++i) { free(pr[i]); } free(pr); free(detections); free(truth_classes_count); fprintf(stderr, "Total Detection Time: %f Seconds\n", (double)(time(0) - start)); //getchar(); } void validate_calibrate_valid(char *datacfg, char *cfgfile, char *weightfile, int input_calibration) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.txt"); printf("valid=%s \n", valid_images); network net = parse_network_cfg(cfgfile, 1, 0); // batch=1, quantized=0 if (!input_calibration) { printf("\n -input_calibration <number> - isn't specified in command line, will be used 1000 images \n\n"); input_calibration = 1000; } net.do_input_calibration = input_calibration; if (weightfile) { load_weights_upto_cpu(&net, weightfile, net.n); } //set_batch_network(&net, 1); yolov2_fuse_conv_batchnorm(net); srand(time(0)); #ifdef GPU size_t workspace_size = 0; for (j = 0; j < net.n; ++j) { layer l = net.layers[j]; size_t cur_workspace_size = (size_t)l.out_h*l.out_w*l.size*l.size*l.c * sizeof(float); if (cur_workspace_size > workspace_size) workspace_size = cur_workspace_size; } cudaFree(net.workspace); net.workspace = calloc(1, workspace_size); #endif // GPU list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net.layers[net.n - 1]; int classes = l.classes; int m = plist->size; int i = 0; int t; const float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = { 0 }; args.w = net.w; args.h = net.h; args.type = IMAGE_DATA; //args.type = LETTERBOX_DATA; //const float thresh_calc_avg_iou = 0.24; float avg_iou = 0; int tp_for_thresh = 0; int fp_for_thresh = 0; box_prob *detections = calloc(1, sizeof(box_prob)); int detections_count = 0; int unique_truth_count = 0; int *truth_classes_count = calloc(classes, sizeof(int)); for (t = 0; t < nthreads; ++t) { args.path = paths[i + t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } time_t start = time(0); for (i = nthreads; i < m + nthreads; i += nthreads) { fprintf(stderr, "%d\n", i); for (t = 0; t < nthreads && i + t - nthreads < m; ++t) { pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for (t = 0; t < nthreads && i + t < m; ++t) { args.path = paths[i + t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for (t = 0; t < nthreads && i + t - nthreads < m; ++t) { const int image_index = i + t - nthreads; char *path = paths[image_index]; //char *id = basecfg(path); float *X = val_resized[t].data; network_calibrate_cpu(net, X); } } }
ext_kernels.h
#pragma once #include <stdbool.h> void initialise_device_memory(void); #pragma omp declare target void zero_edge_flux_buffers(void); #pragma omp end declare target void zero_flux_moments_buffer(void); void zero_flux_in_out(void); void zero_scalar_flux(void); void calc_inner_source(void); void calc_outer_source(void); void calc_scattering_cross_section(void); void calc_dd_coefficients(void); void calc_time_delta(void); void calc_denominator(void); void calc_total_cross_section(void); void store_scalar_flux(double* to); bool check_convergence( double *old, double *new, double epsi, unsigned int *groups_todo, unsigned int *num_groups_todo, bool inner);
GB_unaryop__abs_fp64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_uint16 // op(A') function: GB_tran__abs_fp64_uint16 // C type: double // A type: uint16_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_uint16 ( double *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
quick_gemm.h
// Quick gemm if alpha==0.0 if ( alpha==zero ) { if ( beta==zero ) if ( orderC=='C' ) { #pragma omp parallel for (int jc=0; jc<n; jc++ ) for (int ic=0; ic<m; ic++ ) Ccol(ic,jc) = 0.0; } else { #pragma omp parallel for (int jc=0; jc<n; jc++ ) for (int ic=0; ic<m; ic++ ) Crow(ic,jc) = 0.0; } else if ( orderC=='C' ) { #pragma omp parallel for (int jc=0; jc<n; jc++ ) for (int ic=0; ic<m; ic++ ) Ccol(ic,jc) = beta*Ccol(ic,jc); } else { #pragma omp parallel for (int jc=0; jc<n; jc++ ) for (int ic=0; ic<m; ic++ ) Crow(ic,jc) = beta*Crow(ic,jc); } return; }
expected_output.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> //--------------------------------------------------------------------- // program SP //--------------------------------------------------------------------- //---------- // Class S: //---------- //---------- // Class W: //---------- //---------- // Class A: //---------- //---------- // Class B: //---------- //---------- // Class C: //---------- //---------- // Class D: //---------- //---------- // Class E: //---------- struct anon_NAS_SP_c_78 { double real; double imag; }; typedef struct anon_NAS_SP_c_78 dcomplex; /*common /global/*/ int grid_points[3]; int nx2; int ny2; int nz2; /*common /constants/*/ double tx1; double tx2; double tx3; double ty1; double ty2; double ty3; double tz1; double tz2; double tz3; double dx1; double dx2; double dx3; double dx4; double dx5; double dy1; double dy2; double dy3; double dy4; double dy5; double dz1; double dz2; double dz3; double dz4; double dz5; double dssp; double dt; double ce[5][13]; double dxmax; double dymax; double dzmax; double xxcon1; double xxcon2; double xxcon3; double xxcon4; double xxcon5; double dx1tx1; double dx2tx1; double dx3tx1; double dx4tx1; double dx5tx1; double yycon1; double yycon2; double yycon3; double yycon4; double yycon5; double dy1ty1; double dy2ty1; double dy3ty1; double dy4ty1; double dy5ty1; double zzcon1; double zzcon2; double zzcon3; double zzcon4; double zzcon5; double dz1tz1; double dz2tz1; double dz3tz1; double dz4tz1; double dz5tz1; double dnxm1; double dnym1; double dnzm1; double c1c2; double c1c5; double c3c4; double c1345; double conz1; double c1; double c2; double c3; double c4; double c5; double c4dssp; double c5dssp; double dtdssp; double dttx1; double bt; double dttx2; double dtty1; double dtty2; double dttz1; double dttz2; double c2dttx1; double c2dtty1; double c2dttz1; double comz1; double comz4; double comz5; double comz6; double c3c4tx3; double c3c4ty3; double c3c4tz3; double c2iv; double con43; double con16; //--------------------------------------------------------------------- // To improve cache performance, grid dimensions padded by 1 // for even number sizes only //--------------------------------------------------------------------- /*common /fields/*/ double u[36][37][37][5]; double us[36][37][37]; double vs[36][37][37]; double ws[36][37][37]; double qs[36][37][37]; double rho_i[36][37][37]; double speed[36][37][37]; double square[36][37][37]; double rhs[36][37][37][5]; double forcing[36][37][37][5]; //----------------------------------------------------------------------- // Timer constants //----------------------------------------------------------------------- //----------------------------------------------------------------------- void initialize(); void lhsinit(int ni, int nj, double lhs[37][37][5], double lhsp[37][37][5], double lhsm[37][37][5]); void lhsinitj(int nj, int ni, double lhs[37][37][5], double lhsp[37][37][5], double lhsm[37][37][5]); void exact_solution(double xi, double eta, double zeta, double dtemp[5]); void exact_rhs(); void set_constants(); void adi(); void compute_rhs(); void x_solve(); void ninvr(); void y_solve(); void pinvr(); void z_solve(); void tzetar(); void add(); void txinvr(); void error_norm(double rms[5]); void rhs_norm(double rms[5]); void verify(int no_time_steps, char *Class, int *verified); void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified); double start[64]; double elapsed[64]; double elapsed_time(); void timer_clear(int n); void timer_start(int n); void timer_stop(int n); double timer_read(int n); void wtime(double *t); int main(int argc, char *argv[]) { int i, niter, step, n3; double mflops; double t; double tmax; double trecs[16]; int verified; char Class; char *t_names[16]; printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - SP Benchmark\n\n"); niter = 400; dt = 0.0015; grid_points[0] = 36; grid_points[1] = 36; grid_points[2] = 36; printf(" Size: %4dx%4dx%4d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %4d dt: %10.6f\n", niter, dt); printf("\n"); if((grid_points[0] > 36) || (grid_points[1] > 36) || (grid_points[2] > 36)) { printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); return 0; } nx2 = grid_points[0] - 2; ny2 = grid_points[1] - 2; nz2 = grid_points[2] - 2; set_constants(); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(i = 1; i <= 15; i++) { timer_clear(i); } exact_rhs(); initialize(); //--------------------------------------------------------------------- // do one time step to touch all code, and reinitialize //--------------------------------------------------------------------- adi(); initialize(); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(i = 1; i <= 15; i++) { timer_clear(i); } timer_start(1); /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : printf#267{printf(" Time step %4d\n", step)} compute_rhs#270{compute_rhs()} txinvr#271{txinvr()} x_solve#272{x_solve()} y_solve#273{y_solve()} z_solve#274{z_solve()} add#275{add()} ****************************************/ for(step = 1; step <= niter; step++) { if((step % 20) == 0 || step == 1) { printf(" Time step %4d\n", step); } adi(); } timer_stop(1); tmax = timer_read(1); verify(niter, &Class, &verified); if(tmax != 0.0) { n3 = grid_points[0] * grid_points[1] * grid_points[2]; t = (grid_points[0] + grid_points[1] + grid_points[2]) / 3.0; mflops = (881.174 * (double) n3 - 4683.91 * (t * t) + 11484.5 * t - 19272.4) * (double) niter / (tmax * 1000000.0); } else { mflops = 0.0; } print_results("SP", Class, grid_points[0], grid_points[1], grid_points[2], niter, tmax, mflops, " floating point", verified); int exitValue = verified ? 0 : 1; return exitValue; } //--------------------------------------------------------------------- // this function computes the norm of the difference between the // computed solution and the exact solution //--------------------------------------------------------------------- void error_norm(double rms[5]) { int i, j, k, m, d; double xi; double eta; double zeta; double u_exact[5]; double add; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rms[m] = 0.0; } #pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, add) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, u, u_exact) reduction(+ : rms[:5]) for(k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; // #pragma omp parallel for default(shared) private(j, i, m, eta, xi, add) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, u, u_exact) reduction(+ : rms[:5]) for(j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; // #pragma omp parallel for default(shared) private(i, m, xi, add) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, u, u_exact) reduction(+ : rms[:5]) for(i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; exact_solution(xi, eta, zeta, u_exact); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { add = u[k][j][i][m] - u_exact[m]; rms[m] = rms[m] + add * add; } } } } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(d = 0; d < 3; d++) { rms[m] = rms[m] / (double) (grid_points[d] - 2); } rms[m] = sqrt(rms[m]); } } void rhs_norm(double rms[5]) { int i, j, k, d, m; double add; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rms[m] = 0.0; } #pragma omp parallel for default(shared) private(k, j, i, m, add) firstprivate(nz2, ny2, nx2, rhs) reduction(+ : rms[:5]) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, m, add) firstprivate(ny2, nx2, k, rhs) reduction(+ : rms[:5]) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m, add) firstprivate(nx2, k, j, rhs) reduction(+ : rms[:5]) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { add = rhs[k][j][i][m]; rms[m] = rms[m] + add * add; } } } } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(d = 0; d < 3; d++) { rms[m] = rms[m] / (double) (grid_points[d] - 2); } rms[m] = sqrt(rms[m]); } } //--------------------------------------------------------------------- // compute the right hand side based on exact solution //--------------------------------------------------------------------- void exact_rhs() { double dtemp[5]; double xi; double eta; double zeta; double dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; double ue[36][5]; double buf[36][5]; double q[36]; double cuf[36]; //--------------------------------------------------------------------- // initialize //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points) for(k = 0; k <= grid_points[2] - 1; k++) { // #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points) for(j = 0; j <= grid_points[1] - 1; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points) for(i = 0; i <= grid_points[0] - 1; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { forcing[k][j][i][m] = 0.0; } } } } //--------------------------------------------------------------------- // xi-direction flux differences //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, dtpp, im1, ip1) firstprivate(dnzm1, dnym1, dnxm1, tx2, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q) for(k = 1; k <= grid_points[2] - 2; k++) { zeta = (double) k * dnzm1; // #pragma omp parallel for default(shared) private(j, i, m, eta, xi, dtpp, im1, ip1) firstprivate(dnym1, dnxm1, zeta, tx2, k, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q) for(j = 1; j <= grid_points[1] - 2; j++) { eta = (double) j * dnym1; // #pragma omp parallel for default(shared) private(i, m, xi, dtpp) firstprivate(dnxm1, zeta, eta, grid_points, ce, dtemp) for(i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; exact_solution(xi, eta, zeta, dtemp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 1; m < 5; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] + buf[i][3] * ue[i][3]); } // #pragma omp parallel for default(shared) private(i, im1, ip1) firstprivate(tx2, k, j, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, grid_points, ue, q, buf, cuf) for(i = 1; i <= grid_points[0] - 2; i++) { im1 = i - 1; ip1 = i + 1; forcing[k][j][i][0] = forcing[k][j][i][0] - tx2 * (ue[ip1][1] - ue[im1][1]) + dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - tx2 * ((ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1])) - (ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) + xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) + dx2tx1 * (ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - tx2 * (ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) + xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) + dx3tx1 * (ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - tx2 * (ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) + xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) + dx4tx1 * (ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - tx2 * (buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) - buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] + buf[im1][0]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) + dx5tx1 * (ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { i = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]); i = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]); } // #pragma omp parallel for default(shared) private(i, m) firstprivate(dssp, k, j, grid_points, ue) for(i = 3; i <= grid_points[0] - 4; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]); } } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { i = grid_points[0] - 3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m]); i = grid_points[0] - 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]); } } } //--------------------------------------------------------------------- // eta-direction flux differences //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, i, j, m, zeta, xi, eta, dtpp, jm1, jp1) firstprivate(dnzm1, dnxm1, dnym1, ty2, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q) for(k = 1; k <= grid_points[2] - 2; k++) { zeta = (double) k * dnzm1; // #pragma omp parallel for default(shared) private(i, j, m, xi, eta, dtpp, jm1, jp1) firstprivate(dnxm1, dnym1, zeta, ty2, k, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q) for(i = 1; i <= grid_points[0] - 2; i++) { xi = (double) i * dnxm1; // #pragma omp parallel for default(shared) private(j, m, eta, dtpp) firstprivate(dnym1, zeta, xi, grid_points, ce, dtemp) for(j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, dtemp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 1; m < 5; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] + buf[j][3] * ue[j][3]); } // #pragma omp parallel for default(shared) private(j, jm1, jp1) firstprivate(ty2, k, i, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, grid_points, ue, buf, q, cuf) for(j = 1; j <= grid_points[1] - 2; j++) { jm1 = j - 1; jp1 = j + 1; forcing[k][j][i][0] = forcing[k][j][i][0] - ty2 * (ue[jp1][2] - ue[jm1][2]) + dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - ty2 * (ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) + yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) + dy2ty1 * (ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - ty2 * ((ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1])) - (ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) + yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) + dy3ty1 * (ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - ty2 * (ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) + yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) + dy4ty1 * (ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - ty2 * (buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) - buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] + buf[jm1][0]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) + dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { j = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]); j = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]); } // #pragma omp parallel for default(shared) private(j, m) firstprivate(dssp, k, i, grid_points, ue) for(j = 3; j <= grid_points[1] - 4; j++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]); } } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { j = grid_points[1] - 3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m]); j = grid_points[1] - 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]); } } } //--------------------------------------------------------------------- // zeta-direction flux differences //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j, i, k, m, eta, xi, zeta, dtpp, km1, kp1) firstprivate(dnym1, dnxm1, dnzm1, tz2, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q) for(j = 1; j <= grid_points[1] - 2; j++) { eta = (double) j * dnym1; // #pragma omp parallel for default(shared) private(i, k, m, xi, zeta, dtpp, km1, kp1) firstprivate(dnxm1, dnzm1, eta, tz2, j, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q) for(i = 1; i <= grid_points[0] - 2; i++) { xi = (double) i * dnxm1; // #pragma omp parallel for default(shared) private(k, m, zeta, dtpp) firstprivate(dnzm1, eta, xi, grid_points, ce, dtemp) for(k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, dtemp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 1; m < 5; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] + buf[k][3] * ue[k][3]); } // #pragma omp parallel for default(shared) private(k, km1, kp1) firstprivate(tz2, j, i, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, grid_points, ue, buf, q, cuf) for(k = 1; k <= grid_points[2] - 2; k++) { km1 = k - 1; kp1 = k + 1; forcing[k][j][i][0] = forcing[k][j][i][0] - tz2 * (ue[kp1][3] - ue[km1][3]) + dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - tz2 * (ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) + zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) + dz2tz1 * (ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - tz2 * (ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) + zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) + dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - tz2 * ((ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1])) - (ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) + zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) + dz4tz1 * (ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - tz2 * (buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) - buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0] + buf[km1][0]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) + dz5tz1 * (ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { k = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]); k = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]); } // #pragma omp parallel for default(shared) private(k, m) firstprivate(dssp, j, i, grid_points, ue) for(k = 3; k <= grid_points[2] - 4; k++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]); } } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { k = grid_points[2] - 3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m]); k = grid_points[2] - 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]); } } } //--------------------------------------------------------------------- // now change the sign of the forcing function, //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points) for(k = 1; k <= grid_points[2] - 2; k++) { // #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points) for(j = 1; j <= grid_points[1] - 2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points) for(i = 1; i <= grid_points[0] - 2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { forcing[k][j][i][m] = -1.0 * forcing[k][j][i][m]; } } } } } //--------------------------------------------------------------------- // this function returns the exact solution at point xi, eta, zeta //--------------------------------------------------------------------- void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { int m; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { dtemp[m] = ce[m][0] + xi * (ce[m][1] + xi * (ce[m][4] + xi * (ce[m][7] + xi * ce[m][10]))) + eta * (ce[m][2] + eta * (ce[m][5] + eta * (ce[m][8] + eta * ce[m][11]))) + zeta * (ce[m][3] + zeta * (ce[m][6] + zeta * (ce[m][9] + zeta * ce[m][12]))); } } void adi() { compute_rhs(); txinvr(); x_solve(); y_solve(); z_solve(); add(); } //--------------------------------------------------------------------- // addition of update to the vector u //--------------------------------------------------------------------- void add() { int i, j, k, m; #pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(nz2, ny2, nx2, rhs) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, rhs) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, rhs) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m]; } } } } } //--------------------------------------------------------------------- // This subroutine initializes the field variable u using // tri-linear transfinite interpolation of the boundary values //--------------------------------------------------------------------- void initialize() { int i, j, k, m, ix, iy, iz; double xi; double eta; double zeta; double Pface[2][3][5]; double Pxi; double Peta; double Pzeta; double temp[5]; //--------------------------------------------------------------------- // Later (in compute_rhs) we compute 1/u for every element. A few of // the corner elements are not used, but it convenient (and faster) // to compute the whole thing with a simple loop. Make sure those // values are nonzero by initializing the whole thing here. //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i) firstprivate(grid_points) for(k = 0; k <= grid_points[2] - 1; k++) { // #pragma omp parallel for default(shared) private(j, i) firstprivate(k, grid_points) for(j = 0; j <= grid_points[1] - 1; j++) { // #pragma omp parallel for default(shared) private(i) firstprivate(k, j, grid_points) for(i = 0; i <= grid_points[0] - 1; i++) { u[k][j][i][0] = 1.0; u[k][j][i][1] = 0.0; u[k][j][i][2] = 0.0; u[k][j][i][3] = 0.0; u[k][j][i][4] = 1.0; } } } //--------------------------------------------------------------------- // first store the "interpolated" values everywhere on the grid //--------------------------------------------------------------------- /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : exact_solution#787{exact_solution(Pxi, eta, zeta, &Pface[ix][0][0])} exact_solution#793{exact_solution(xi, Peta, zeta, &Pface[iy][1][0])} exact_solution#799{exact_solution(xi, eta, Pzeta, &Pface[iz][2][0])} ****************************************/ for(k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : exact_solution#787{exact_solution(Pxi, eta, zeta, &Pface[ix][0][0])} exact_solution#793{exact_solution(xi, Peta, zeta, &Pface[iy][1][0])} exact_solution#799{exact_solution(xi, eta, Pzeta, &Pface[iz][2][0])} ****************************************/ for(j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : exact_solution#787{exact_solution(Pxi, eta, zeta, &Pface[ix][0][0])} exact_solution#793{exact_solution(xi, Peta, zeta, &Pface[iy][1][0])} exact_solution#799{exact_solution(xi, eta, Pzeta, &Pface[iz][2][0])} ****************************************/ for(i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(ix = 0; ix < 2; ix++) { Pxi = (double) ix; exact_solution(Pxi, eta, zeta, &Pface[ix][0][0]); } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(iy = 0; iy < 2; iy++) { Peta = (double) iy; exact_solution(xi, Peta, zeta, &Pface[iy][1][0]); } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(iz = 0; iz < 2; iz++) { Pzeta = (double) iz; exact_solution(xi, eta, Pzeta, &Pface[iz][2][0]); } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m]; u[k][j][i][m] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta; } } } } //--------------------------------------------------------------------- // now store the exact values on the boundaries //--------------------------------------------------------------------- //--------------------------------------------------------------------- // west face //--------------------------------------------------------------------- xi = 0.0; i = 0; #pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp) for(k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; // #pragma omp parallel for default(shared) private(j, m, eta) firstprivate(dnym1, zeta, xi, k, i, grid_points, ce, temp) for(j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, temp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // east face //--------------------------------------------------------------------- xi = 1.0; i = grid_points[0] - 1; #pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp) for(k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; // #pragma omp parallel for default(shared) private(j, m, eta) firstprivate(dnym1, zeta, xi, k, i, grid_points, ce, temp) for(j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, temp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // south face //--------------------------------------------------------------------- eta = 0.0; j = 0; #pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp) for(k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; // #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp) for(i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; exact_solution(xi, eta, zeta, temp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // north face //--------------------------------------------------------------------- eta = 1.0; j = grid_points[1] - 1; #pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp) for(k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; // #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp) for(i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; exact_solution(xi, eta, zeta, temp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // bottom face //--------------------------------------------------------------------- zeta = 0.0; k = 0; #pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp) for(j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; // #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp) for(i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; exact_solution(xi, eta, zeta, temp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } //--------------------------------------------------------------------- // top face //--------------------------------------------------------------------- zeta = 1.0; k = grid_points[2] - 1; #pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp) for(j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; // #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp) for(i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; exact_solution(xi, eta, zeta, temp); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { u[k][j][i][m] = temp[m]; } } } } void lhsinit(int ni, int nj, double lhs[37][37][5], double lhsp[37][37][5], double lhsm[37][37][5]) { int j, m; //--------------------------------------------------------------------- // zap the whole left hand side for starters // set all diagonal values to 1. This is overkill, but convenient //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j, m) firstprivate(nj, ni) for(j = 1; j <= nj; j++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { lhs[j][0][m] = 0.0; lhsp[j][0][m] = 0.0; lhsm[j][0][m] = 0.0; lhs[j][ni][m] = 0.0; lhsp[j][ni][m] = 0.0; lhsm[j][ni][m] = 0.0; } lhs[j][0][2] = 1.0; lhsp[j][0][2] = 1.0; lhsm[j][0][2] = 1.0; lhs[j][ni][2] = 1.0; lhsp[j][ni][2] = 1.0; lhsm[j][ni][2] = 1.0; } } void lhsinitj(int nj, int ni, double lhs[37][37][5], double lhsp[37][37][5], double lhsm[37][37][5]) { int i, m; //--------------------------------------------------------------------- // zap the whole left hand side for starters // set all diagonal values to 1. This is overkill, but convenient //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(i, m) firstprivate(ni, nj) for(i = 1; i <= ni; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { lhs[0][i][m] = 0.0; lhsp[0][i][m] = 0.0; lhsm[0][i][m] = 0.0; lhs[nj][i][m] = 0.0; lhsp[nj][i][m] = 0.0; lhsm[nj][i][m] = 0.0; } lhs[0][i][2] = 1.0; lhsp[0][i][2] = 1.0; lhsm[0][i][2] = 1.0; lhs[nj][i][2] = 1.0; lhsp[nj][i][2] = 1.0; lhsm[nj][i][2] = 1.0; } } //--------------------------------------------------------------------- // block-diagonal matrix-vector multiplication //--------------------------------------------------------------------- void ninvr() { int i, j, k; double r1, r2, r3, r4, r5, t1, t2; #pragma omp parallel for default(shared) private(k, j, i, r1, r2, r3, r4, r5, t1, t2) firstprivate(nz2, ny2, nx2, bt) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, r1, r2, r3, r4, r5, t1, t2) firstprivate(ny2, nx2, k, bt) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, r1, r2, r3, r4, r5, t1, t2) firstprivate(nx2, k, j, bt) for(i = 1; i <= nx2; i++) { r1 = rhs[k][j][i][0]; r2 = rhs[k][j][i][1]; r3 = rhs[k][j][i][2]; r4 = rhs[k][j][i][3]; r5 = rhs[k][j][i][4]; t1 = bt * r3; t2 = 0.5 * (r4 + r5); rhs[k][j][i][0] = -r2; rhs[k][j][i][1] = r1; rhs[k][j][i][2] = bt * (r4 - r5); rhs[k][j][i][3] = -t1 + t2; rhs[k][j][i][4] = t1 + t2; } } } } //--------------------------------------------------------------------- // block-diagonal matrix-vector multiplication //--------------------------------------------------------------------- void pinvr() { int i, j, k; double r1, r2, r3, r4, r5, t1, t2; #pragma omp parallel for default(shared) private(k, j, i, r1, r2, r3, r4, r5, t1, t2) firstprivate(nz2, ny2, nx2, bt) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, r1, r2, r3, r4, r5, t1, t2) firstprivate(ny2, nx2, k, bt) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, r1, r2, r3, r4, r5, t1, t2) firstprivate(nx2, k, j, bt) for(i = 1; i <= nx2; i++) { r1 = rhs[k][j][i][0]; r2 = rhs[k][j][i][1]; r3 = rhs[k][j][i][2]; r4 = rhs[k][j][i][3]; r5 = rhs[k][j][i][4]; t1 = bt * r1; t2 = 0.5 * (r4 + r5); rhs[k][j][i][0] = bt * (r4 - r5); rhs[k][j][i][1] = -r3; rhs[k][j][i][2] = r2; rhs[k][j][i][3] = -t1 + t2; rhs[k][j][i][4] = t1 + t2; } } } } void compute_rhs() { int i, j, k, m; double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; //--------------------------------------------------------------------- // compute the reciprocal of density, and the kinetic energy, // and the speed of sound. //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i, rho_inv, aux) firstprivate(c1c2, grid_points, u) for(k = 0; k <= grid_points[2] - 1; k++) { // #pragma omp parallel for default(shared) private(j, i, rho_inv, aux) firstprivate(k, c1c2, grid_points, u) for(j = 0; j <= grid_points[1] - 1; j++) { // #pragma omp parallel for default(shared) private(i, rho_inv, aux) firstprivate(k, j, c1c2, grid_points, u) for(i = 0; i <= grid_points[0] - 1; i++) { rho_inv = 1.0 / u[k][j][i][0]; rho_i[k][j][i] = rho_inv; us[k][j][i] = u[k][j][i][1] * rho_inv; vs[k][j][i] = u[k][j][i][2] * rho_inv; ws[k][j][i] = u[k][j][i][3] * rho_inv; square[k][j][i] = 0.5 * (u[k][j][i][1] * u[k][j][i][1] + u[k][j][i][2] * u[k][j][i][2] + u[k][j][i][3] * u[k][j][i][3]) * rho_inv; qs[k][j][i] = square[k][j][i] * rho_inv; //------------------------------------------------------------------- // (don't need speed and ainx until the lhs computation) //------------------------------------------------------------------- aux = c1c2 * rho_inv * (u[k][j][i][4] - square[k][j][i]); speed[k][j][i] = sqrt(aux); } } } //--------------------------------------------------------------------- // copy the exact forcing term to the right hand side; because // this forcing term is known, we can store it on the whole grid // including the boundary //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, forcing) for(k = 0; k <= grid_points[2] - 1; k++) { // #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points, forcing) for(j = 0; j <= grid_points[1] - 1; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points, forcing) for(i = 0; i <= grid_points[0] - 1; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = forcing[k][j][i][m]; } } } } //--------------------------------------------------------------------- // compute xi-direction fluxes //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i, m, uijk, up1, um1) firstprivate(nz2, ny2, nx2, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, dssp, us, u, square, vs, ws, qs, rho_i) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, uijk, up1, um1) firstprivate(ny2, nx2, k, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, us, u, square, vs, ws, qs, rho_i) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, uijk, up1, um1) firstprivate(nx2, k, j, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, us, u, square, vs, ws, qs, rho_i) for(i = 1; i <= nx2; i++) { uijk = us[k][j][i]; up1 = us[k][j][i + 1]; um1 = us[k][j][i - 1]; rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 * (u[k][j][i + 1][0] - 2.0 * u[k][j][i][0] + u[k][j][i - 1][0]) - tx2 * (u[k][j][i + 1][1] - u[k][j][i - 1][1]); rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 * (u[k][j][i + 1][1] - 2.0 * u[k][j][i][1] + u[k][j][i - 1][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[k][j][i + 1][1] * up1 - u[k][j][i - 1][1] * um1 + (u[k][j][i + 1][4] - square[k][j][i + 1] - u[k][j][i - 1][4] + square[k][j][i - 1]) * c2); rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 * (u[k][j][i + 1][2] - 2.0 * u[k][j][i][2] + u[k][j][i - 1][2]) + xxcon2 * (vs[k][j][i + 1] - 2.0 * vs[k][j][i] + vs[k][j][i - 1]) - tx2 * (u[k][j][i + 1][2] * up1 - u[k][j][i - 1][2] * um1); rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 * (u[k][j][i + 1][3] - 2.0 * u[k][j][i][3] + u[k][j][i - 1][3]) + xxcon2 * (ws[k][j][i + 1] - 2.0 * ws[k][j][i] + ws[k][j][i - 1]) - tx2 * (u[k][j][i + 1][3] * up1 - u[k][j][i - 1][3] * um1); rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 * (u[k][j][i + 1][4] - 2.0 * u[k][j][i][4] + u[k][j][i - 1][4]) + xxcon3 * (qs[k][j][i + 1] - 2.0 * qs[k][j][i] + qs[k][j][i - 1]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[k][j][i + 1][4] * rho_i[k][j][i + 1] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k][j][i - 1][4] * rho_i[k][j][i - 1]) - tx2 * ((c1 * u[k][j][i + 1][4] - c2 * square[k][j][i + 1]) * up1 - (c1 * u[k][j][i - 1][4] - c2 * square[k][j][i - 1]) * um1); } } //--------------------------------------------------------------------- // add fourth order xi-direction dissipation //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, m, i) firstprivate(ny2, k, dssp, u) for(j = 1; j <= ny2; j++) { i = 1; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]); } i = 2; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]); } } // #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u) for(i = 3; i <= nx2 - 2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]); } } } // #pragma omp parallel for default(shared) private(j, m, i) firstprivate(ny2, nx2, k, dssp, u) for(j = 1; j <= ny2; j++) { i = nx2 - 1; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m]); } i = nx2; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 5.0 * u[k][j][i][m]); } } } //--------------------------------------------------------------------- // compute eta-direction fluxes //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i, m, vijk, vp1, vm1) firstprivate(nz2, ny2, nx2, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, dssp, vs, u, us, square, ws, qs, rho_i) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, vijk, vp1, vm1) firstprivate(ny2, nx2, k, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, vs, u, us, square, ws, qs, rho_i) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, vijk, vp1, vm1) firstprivate(nx2, k, j, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, vs, u, us, square, ws, qs, rho_i) for(i = 1; i <= nx2; i++) { vijk = vs[k][j][i]; vp1 = vs[k][j + 1][i]; vm1 = vs[k][j - 1][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 * (u[k][j + 1][i][0] - 2.0 * u[k][j][i][0] + u[k][j - 1][i][0]) - ty2 * (u[k][j + 1][i][2] - u[k][j - 1][i][2]); rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 * (u[k][j + 1][i][1] - 2.0 * u[k][j][i][1] + u[k][j - 1][i][1]) + yycon2 * (us[k][j + 1][i] - 2.0 * us[k][j][i] + us[k][j - 1][i]) - ty2 * (u[k][j + 1][i][1] * vp1 - u[k][j - 1][i][1] * vm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 * (u[k][j + 1][i][2] - 2.0 * u[k][j][i][2] + u[k][j - 1][i][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[k][j + 1][i][2] * vp1 - u[k][j - 1][i][2] * vm1 + (u[k][j + 1][i][4] - square[k][j + 1][i] - u[k][j - 1][i][4] + square[k][j - 1][i]) * c2); rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 * (u[k][j + 1][i][3] - 2.0 * u[k][j][i][3] + u[k][j - 1][i][3]) + yycon2 * (ws[k][j + 1][i] - 2.0 * ws[k][j][i] + ws[k][j - 1][i]) - ty2 * (u[k][j + 1][i][3] * vp1 - u[k][j - 1][i][3] * vm1); rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 * (u[k][j + 1][i][4] - 2.0 * u[k][j][i][4] + u[k][j - 1][i][4]) + yycon3 * (qs[k][j + 1][i] - 2.0 * qs[k][j][i] + qs[k][j - 1][i]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[k][j + 1][i][4] * rho_i[k][j + 1][i] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k][j - 1][i][4] * rho_i[k][j - 1][i]) - ty2 * ((c1 * u[k][j + 1][i][4] - c2 * square[k][j + 1][i]) * vp1 - (c1 * u[k][j - 1][i][4] - c2 * square[k][j - 1][i]) * vm1); } } //--------------------------------------------------------------------- // add fourth order eta-direction dissipation //--------------------------------------------------------------------- j = 1; // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]); } } j = 2; // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]); } } // #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u) for(j = 3; j <= ny2 - 2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]); } } } j = ny2 - 1; // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, ny2, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m]); } } j = ny2; // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, ny2, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 5.0 * u[k][j][i][m]); } } } //--------------------------------------------------------------------- // compute zeta-direction fluxes //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(k, j, i, wijk, wp1, wm1) firstprivate(nz2, ny2, nx2, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, ws, u, us, vs, square, qs, rho_i) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, wijk, wp1, wm1) firstprivate(ny2, nx2, k, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, ws, u, us, vs, square, qs, rho_i) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, wijk, wp1, wm1) firstprivate(nx2, k, j, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, ws, u, us, vs, square, qs, rho_i) for(i = 1; i <= nx2; i++) { wijk = ws[k][j][i]; wp1 = ws[k + 1][j][i]; wm1 = ws[k - 1][j][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 * (u[k + 1][j][i][0] - 2.0 * u[k][j][i][0] + u[k - 1][j][i][0]) - tz2 * (u[k + 1][j][i][3] - u[k - 1][j][i][3]); rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 * (u[k + 1][j][i][1] - 2.0 * u[k][j][i][1] + u[k - 1][j][i][1]) + zzcon2 * (us[k + 1][j][i] - 2.0 * us[k][j][i] + us[k - 1][j][i]) - tz2 * (u[k + 1][j][i][1] * wp1 - u[k - 1][j][i][1] * wm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 * (u[k + 1][j][i][2] - 2.0 * u[k][j][i][2] + u[k - 1][j][i][2]) + zzcon2 * (vs[k + 1][j][i] - 2.0 * vs[k][j][i] + vs[k - 1][j][i]) - tz2 * (u[k + 1][j][i][2] * wp1 - u[k - 1][j][i][2] * wm1); rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 * (u[k + 1][j][i][3] - 2.0 * u[k][j][i][3] + u[k - 1][j][i][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[k + 1][j][i][3] * wp1 - u[k - 1][j][i][3] * wm1 + (u[k + 1][j][i][4] - square[k + 1][j][i] - u[k - 1][j][i][4] + square[k - 1][j][i]) * c2); rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 * (u[k + 1][j][i][4] - 2.0 * u[k][j][i][4] + u[k - 1][j][i][4]) + zzcon3 * (qs[k + 1][j][i] - 2.0 * qs[k][j][i] + qs[k - 1][j][i]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[k + 1][j][i][4] * rho_i[k + 1][j][i] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k - 1][j][i][4] * rho_i[k - 1][j][i]) - tz2 * ((c1 * u[k + 1][j][i][4] - c2 * square[k + 1][j][i]) * wp1 - (c1 * u[k - 1][j][i][4] - c2 * square[k - 1][j][i]) * wm1); } } } //--------------------------------------------------------------------- // add fourth order zeta-direction dissipation //--------------------------------------------------------------------- k = 1; #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]); } } } k = 2; #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]); } } } #pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(nz2, ny2, nx2, dssp, u) for(k = 3; k <= nz2 - 2; k++) { // #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]); } } } } k = nz2 - 1; #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m]); } } } k = nz2; #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 5.0 * u[k][j][i][m]); } } } #pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(nz2, ny2, nx2, dt) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dt) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dt) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] * dt; } } } } } //--------------------------------------------------------------------- // block-diagonal matrix-vector multiplication //--------------------------------------------------------------------- void txinvr() { int i, j, k; double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3, r4, r5, ac2inv; #pragma omp parallel for default(shared) private(k, j, i, ru1, uu, vv, ww, ac, ac2inv, r1, r2, r3, r4, r5, t1, t2, t3) firstprivate(nz2, ny2, nx2, c2, bt, rho_i, us, vs, ws, speed, qs) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, ru1, uu, vv, ww, ac, ac2inv, r1, r2, r3, r4, r5, t1, t2, t3) firstprivate(ny2, nx2, k, c2, bt, rho_i, us, vs, ws, speed, qs) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, ru1, uu, vv, ww, ac, ac2inv, r1, r2, r3, r4, r5, t1, t2, t3) firstprivate(nx2, k, j, c2, bt, rho_i, us, vs, ws, speed, qs) for(i = 1; i <= nx2; i++) { ru1 = rho_i[k][j][i]; uu = us[k][j][i]; vv = vs[k][j][i]; ww = ws[k][j][i]; ac = speed[k][j][i]; ac2inv = ac * ac; r1 = rhs[k][j][i][0]; r2 = rhs[k][j][i][1]; r3 = rhs[k][j][i][2]; r4 = rhs[k][j][i][3]; r5 = rhs[k][j][i][4]; t1 = c2 / ac2inv * (qs[k][j][i] * r1 - uu * r2 - vv * r3 - ww * r4 + r5); t2 = bt * ru1 * (uu * r1 - r2); t3 = (bt * ru1 * ac) * t1; rhs[k][j][i][0] = r1 - t1; rhs[k][j][i][1] = -ru1 * (ww * r1 - r4); rhs[k][j][i][2] = ru1 * (vv * r1 - r3); rhs[k][j][i][3] = -t2 + t3; rhs[k][j][i][4] = t2 + t3; } } } } //--------------------------------------------------------------------- // block-diagonal matrix-vector multiplication //--------------------------------------------------------------------- void tzetar() { int i, j, k; double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5; double btuz, ac2u, uzik1; #pragma omp parallel for default(shared) private(k, j, i, xvel, yvel, zvel, ac, ac2u, r1, r2, r3, r4, r5, uzik1, btuz, t1, t2, t3) firstprivate(nz2, ny2, nx2, bt, c2iv, us, vs, ws, speed, u, qs) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(j, i, xvel, yvel, zvel, ac, ac2u, r1, r2, r3, r4, r5, uzik1, btuz, t1, t2, t3) firstprivate(ny2, nx2, k, bt, c2iv, us, vs, ws, speed, u, qs) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, xvel, yvel, zvel, ac, ac2u, r1, r2, r3, r4, r5, uzik1, btuz, t1, t2, t3) firstprivate(nx2, k, j, bt, c2iv, us, vs, ws, speed, u, qs) for(i = 1; i <= nx2; i++) { xvel = us[k][j][i]; yvel = vs[k][j][i]; zvel = ws[k][j][i]; ac = speed[k][j][i]; ac2u = ac * ac; r1 = rhs[k][j][i][0]; r2 = rhs[k][j][i][1]; r3 = rhs[k][j][i][2]; r4 = rhs[k][j][i][3]; r5 = rhs[k][j][i][4]; uzik1 = u[k][j][i][0]; btuz = bt * uzik1; t1 = btuz / ac * (r4 + r5); t2 = r3 + t1; t3 = btuz * (r4 - r5); rhs[k][j][i][0] = t2; rhs[k][j][i][1] = -uzik1 * r2 + xvel * t2; rhs[k][j][i][2] = uzik1 * r1 + yvel * t2; rhs[k][j][i][3] = zvel * t2 + t3; rhs[k][j][i][4] = uzik1 * (-xvel * r2 + yvel * r1) + qs[k][j][i] * t2 + c2iv * ac2u * t1 + zvel * t3; } } } } //--------------------------------------------------------------------- // verification routine //--------------------------------------------------------------------- void verify(int no_time_steps, char *Class, int *verified) { double xcrref[5]; double xceref[5]; double xcrdif[5]; double xcedif[5]; double epsilon; double xce[5]; double xcr[5]; double dtref = 0.0; int m; //--------------------------------------------------------------------- // tolerance level //--------------------------------------------------------------------- epsilon = 1.0e-08; //--------------------------------------------------------------------- // compute the error norm and the residual norm, and exit if not printing //--------------------------------------------------------------------- error_norm(xce); compute_rhs(); rhs_norm(xcr); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *Class = 'U'; *verified = 1; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } //--------------------------------------------------------------------- // reference data for 12X12X12 grids after 100 time steps, // with DT = 1.50e-02 //--------------------------------------------------------------------- if((grid_points[0] == 12) && (grid_points[1] == 12) && (grid_points[2] == 12) && (no_time_steps == 100)) { *Class = 'S'; dtref = 1.5e-2; //--------------------------------------------------------------------- // Reference values of RMS-norms of residual. //--------------------------------------------------------------------- xcrref[0] = 2.7470315451339479e-02; xcrref[1] = 1.0360746705285417e-02; xcrref[2] = 1.6235745065095532e-02; xcrref[3] = 1.5840557224455615e-02; xcrref[4] = 3.4849040609362460e-02; //--------------------------------------------------------------------- // Reference values of RMS-norms of solution error. //--------------------------------------------------------------------- xceref[0] = 2.7289258557377227e-05; xceref[1] = 1.0364446640837285e-05; xceref[2] = 1.6154798287166471e-05; xceref[3] = 1.5750704994480102e-05; xceref[4] = 3.4177666183390531e-05; //--------------------------------------------------------------------- // reference data for 36X36X36 grids after 400 time steps, // with DT = 1.5e-03 //--------------------------------------------------------------------- } else if((grid_points[0] == 36) && (grid_points[1] == 36) && (grid_points[2] == 36) && (no_time_steps == 400)) { *Class = 'W'; dtref = 1.5e-3; //--------------------------------------------------------------------- // Reference values of RMS-norms of residual. //--------------------------------------------------------------------- xcrref[0] = 0.1893253733584e-02; xcrref[1] = 0.1717075447775e-03; xcrref[2] = 0.2778153350936e-03; xcrref[3] = 0.2887475409984e-03; xcrref[4] = 0.3143611161242e-02; //--------------------------------------------------------------------- // Reference values of RMS-norms of solution error. //--------------------------------------------------------------------- xceref[0] = 0.7542088599534e-04; xceref[1] = 0.6512852253086e-05; xceref[2] = 0.1049092285688e-04; xceref[3] = 0.1128838671535e-04; xceref[4] = 0.1212845639773e-03; //--------------------------------------------------------------------- // reference data for 64X64X64 grids after 400 time steps, // with DT = 1.5e-03 //--------------------------------------------------------------------- } else if((grid_points[0] == 64) && (grid_points[1] == 64) && (grid_points[2] == 64) && (no_time_steps == 400)) { *Class = 'A'; dtref = 1.5e-3; //--------------------------------------------------------------------- // Reference values of RMS-norms of residual. //--------------------------------------------------------------------- xcrref[0] = 2.4799822399300195; xcrref[1] = 1.1276337964368832; xcrref[2] = 1.5028977888770491; xcrref[3] = 1.4217816211695179; xcrref[4] = 2.1292113035138280; //--------------------------------------------------------------------- // Reference values of RMS-norms of solution error. //--------------------------------------------------------------------- xceref[0] = 1.0900140297820550e-04; xceref[1] = 3.7343951769282091e-05; xceref[2] = 5.0092785406541633e-05; xceref[3] = 4.7671093939528255e-05; xceref[4] = 1.3621613399213001e-04; //--------------------------------------------------------------------- // reference data for 102X102X102 grids after 400 time steps, // with DT = 1.0e-03 //--------------------------------------------------------------------- } else if((grid_points[0] == 102) && (grid_points[1] == 102) && (grid_points[2] == 102) && (no_time_steps == 400)) { *Class = 'B'; dtref = 1.0e-3; //--------------------------------------------------------------------- // Reference values of RMS-norms of residual. //--------------------------------------------------------------------- xcrref[0] = 0.6903293579998e+02; xcrref[1] = 0.3095134488084e+02; xcrref[2] = 0.4103336647017e+02; xcrref[3] = 0.3864769009604e+02; xcrref[4] = 0.5643482272596e+02; //--------------------------------------------------------------------- // Reference values of RMS-norms of solution error. //--------------------------------------------------------------------- xceref[0] = 0.9810006190188e-02; xceref[1] = 0.1022827905670e-02; xceref[2] = 0.1720597911692e-02; xceref[3] = 0.1694479428231e-02; xceref[4] = 0.1847456263981e-01; //--------------------------------------------------------------------- // reference data for 162X162X162 grids after 400 time steps, // with DT = 0.67e-03 //--------------------------------------------------------------------- } else if((grid_points[0] == 162) && (grid_points[1] == 162) && (grid_points[2] == 162) && (no_time_steps == 400)) { *Class = 'C'; dtref = 0.67e-3; //--------------------------------------------------------------------- // Reference values of RMS-norms of residual. //--------------------------------------------------------------------- xcrref[0] = 0.5881691581829e+03; xcrref[1] = 0.2454417603569e+03; xcrref[2] = 0.3293829191851e+03; xcrref[3] = 0.3081924971891e+03; xcrref[4] = 0.4597223799176e+03; //--------------------------------------------------------------------- // Reference values of RMS-norms of solution error. //--------------------------------------------------------------------- xceref[0] = 0.2598120500183e+00; xceref[1] = 0.2590888922315e-01; xceref[2] = 0.5132886416320e-01; xceref[3] = 0.4806073419454e-01; xceref[4] = 0.5483377491301e+00; //--------------------------------------------------------------------- // reference data for 408X408X408 grids after 500 time steps, // with DT = 0.3e-03 //--------------------------------------------------------------------- } else if((grid_points[0] == 408) && (grid_points[1] == 408) && (grid_points[2] == 408) && (no_time_steps == 500)) { *Class = 'D'; dtref = 0.30e-3; //--------------------------------------------------------------------- // Reference values of RMS-norms of residual. //--------------------------------------------------------------------- xcrref[0] = 0.1044696216887e+05; xcrref[1] = 0.3204427762578e+04; xcrref[2] = 0.4648680733032e+04; xcrref[3] = 0.4238923283697e+04; xcrref[4] = 0.7588412036136e+04; //--------------------------------------------------------------------- // Reference values of RMS-norms of solution error. //--------------------------------------------------------------------- xceref[0] = 0.5089471423669e+01; xceref[1] = 0.5323514855894e+00; xceref[2] = 0.1187051008971e+01; xceref[3] = 0.1083734951938e+01; xceref[4] = 0.1164108338568e+02; //--------------------------------------------------------------------- // reference data for 1020X1020X1020 grids after 500 time steps, // with DT = 0.1e-03 //--------------------------------------------------------------------- } else if((grid_points[0] == 1020) && (grid_points[1] == 1020) && (grid_points[2] == 1020) && (no_time_steps == 500)) { *Class = 'E'; dtref = 0.10e-3; //--------------------------------------------------------------------- // Reference values of RMS-norms of residual. //--------------------------------------------------------------------- xcrref[0] = 0.6255387422609e+05; xcrref[1] = 0.1495317020012e+05; xcrref[2] = 0.2347595750586e+05; xcrref[3] = 0.2091099783534e+05; xcrref[4] = 0.4770412841218e+05; //--------------------------------------------------------------------- // Reference values of RMS-norms of solution error. //--------------------------------------------------------------------- xceref[0] = 0.6742735164909e+02; xceref[1] = 0.5390656036938e+01; xceref[2] = 0.1680647196477e+02; xceref[3] = 0.1536963126457e+02; xceref[4] = 0.1575330146156e+03; } else { *verified = 0; } //--------------------------------------------------------------------- // verification test for residuals if gridsize is one of // the defined grid sizes above (class .ne. 'U') //--------------------------------------------------------------------- //--------------------------------------------------------------------- // Compute the difference of solution values and the known reference values. //--------------------------------------------------------------------- /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]); xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]); } //--------------------------------------------------------------------- // Output the comparison of computed results to known cases. //--------------------------------------------------------------------- if(*Class != 'U') { printf(" Verification being performed for class %c\n", *Class); printf(" accuracy setting for epsilon = %20.13E\n", epsilon); *verified = (fabs(dt - dtref) <= epsilon); if(!(*verified)) { *Class = 'U'; printf(" DT does not match the reference value of %15.8E\n", dtref); } } else { printf(" Unknown class\n"); } if(*Class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { if(*Class == 'U') { printf(" %2d%20.13E\n", m + 1, xcr[m]); } else if(xcrdif[m] <= epsilon) { printf(" %2d%20.13E%20.13E%20.13E\n", m + 1, xcr[m], xcrref[m], xcrdif[m]); } else { *verified = 0; printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n", m + 1, xcr[m], xcrref[m], xcrdif[m]); } } if(*Class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 5; m++) { if(*Class == 'U') { printf(" %2d%20.13E\n", m + 1, xce[m]); } else if(xcedif[m] <= epsilon) { printf(" %2d%20.13E%20.13E%20.13E\n", m + 1, xce[m], xceref[m], xcedif[m]); } else { *verified = 0; printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n", m + 1, xce[m], xceref[m], xcedif[m]); } } if(*Class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if(*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } //--------------------------------------------------------------------- // this function performs the solution of the approximate factorization // step in the x-direction for all five matrix components // simultaneously. The Thomas algorithm is employed to solve the // systems for the x-lines. Boundary conditions are non-periodic //--------------------------------------------------------------------- void x_solve() { int i, j, k, i1, i2, m; double ru1, fac1, fac2; double rhon[36]; double cv[36]; double lhs[37][37][5]; double lhsp[37][37][5]; double lhsm[37][37][5]; #pragma omp parallel for default(shared) private(k, j, i, m, ru1, i1, i2, fac1, fac2) firstprivate(nz2, ny2, nx2, c3c4, con43, c1c5, dx2, dx5, dxmax, dx1, dttx2, dttx1, c2dttx1, comz5, comz4, comz1, comz6, grid_points, rho_i, us, speed, lhs, lhsp, lhsm, cv, rhon) for(k = 1; k <= nz2; k++) { lhsinit(nx2 + 1, ny2, lhs, lhsp, lhsm); //--------------------------------------------------------------------- // Computes the left hand side for the three x-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, i, ru1) firstprivate(ny2, c3c4, k, con43, c1c5, dx2, dx5, dxmax, dx1, nx2, dttx2, dttx1, c2dttx1, grid_points, rho_i, us, cv, rhon) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i, ru1) firstprivate(c3c4, k, j, con43, c1c5, dx2, dx5, dxmax, dx1, grid_points, rho_i, us) for(i = 0; i <= grid_points[0] - 1; i++) { ru1 = c3c4 * rho_i[k][j][i]; cv[i] = us[k][j][i]; rhon[i] = ((((dx2 + con43 * ru1) > (dx5 + c1c5 * ru1) ? (dx2 + con43 * ru1) : (dx5 + c1c5 * ru1))) > (((dxmax + ru1) > (dx1) ? (dxmax + ru1) : (dx1))) ? (((dx2 + con43 * ru1) > (dx5 + c1c5 * ru1) ? (dx2 + con43 * ru1) : (dx5 + c1c5 * ru1))) : (((dxmax + ru1) > (dx1) ? (dxmax + ru1) : (dx1)))); } // #pragma omp parallel for default(shared) private(i) firstprivate(nx2, j, dttx2, dttx1, c2dttx1, cv, rhon) for(i = 1; i <= nx2; i++) { lhs[j][i][0] = 0.0; lhs[j][i][1] = -dttx2 * cv[i - 1] - dttx1 * rhon[i - 1]; lhs[j][i][2] = 1.0 + c2dttx1 * rhon[i]; lhs[j][i][3] = dttx2 * cv[i + 1] - dttx1 * rhon[i + 1]; lhs[j][i][4] = 0.0; } } //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, i) firstprivate(ny2, comz5, comz4, comz1, comz6) for(j = 1; j <= ny2; j++) { i = 1; lhs[j][i][2] = lhs[j][i][2] + comz5; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j][i][4] = lhs[j][i][4] + comz1; lhs[j][i + 1][1] = lhs[j][i + 1][1] - comz4; lhs[j][i + 1][2] = lhs[j][i + 1][2] + comz6; lhs[j][i + 1][3] = lhs[j][i + 1][3] - comz4; lhs[j][i + 1][4] = lhs[j][i + 1][4] + comz1; } // #pragma omp parallel for default(shared) private(j, i) firstprivate(ny2, comz1, comz4, comz6, grid_points) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i) firstprivate(j, comz1, comz4, comz6, grid_points) for(i = 3; i <= grid_points[0] - 4; i++) { lhs[j][i][0] = lhs[j][i][0] + comz1; lhs[j][i][1] = lhs[j][i][1] - comz4; lhs[j][i][2] = lhs[j][i][2] + comz6; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j][i][4] = lhs[j][i][4] + comz1; } } // #pragma omp parallel for default(shared) private(j, i) firstprivate(ny2, comz1, comz4, comz6, comz5, grid_points) for(j = 1; j <= ny2; j++) { i = grid_points[0] - 3; lhs[j][i][0] = lhs[j][i][0] + comz1; lhs[j][i][1] = lhs[j][i][1] - comz4; lhs[j][i][2] = lhs[j][i][2] + comz6; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j][i + 1][0] = lhs[j][i + 1][0] + comz1; lhs[j][i + 1][1] = lhs[j][i + 1][1] - comz4; lhs[j][i + 1][2] = lhs[j][i + 1][2] + comz5; } //--------------------------------------------------------------------- // subsequently, fill the other factors (u+c), (u-c) by adding to // the first //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, i) firstprivate(ny2, nx2, dttx2, k, lhs, speed) for(j = 1; j <= ny2; j++) { // #pragma omp parallel for default(shared) private(i) firstprivate(nx2, j, dttx2, k, lhs, speed) for(i = 1; i <= nx2; i++) { lhsp[j][i][0] = lhs[j][i][0]; lhsp[j][i][1] = lhs[j][i][1] - dttx2 * speed[k][j][i - 1]; lhsp[j][i][2] = lhs[j][i][2]; lhsp[j][i][3] = lhs[j][i][3] + dttx2 * speed[k][j][i + 1]; lhsp[j][i][4] = lhs[j][i][4]; lhsm[j][i][0] = lhs[j][i][0]; lhsm[j][i][1] = lhs[j][i][1] + dttx2 * speed[k][j][i - 1]; lhsm[j][i][2] = lhs[j][i][2]; lhsm[j][i][3] = lhs[j][i][3] - dttx2 * speed[k][j][i + 1]; lhsm[j][i][4] = lhs[j][i][4]; } } //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- //--------------------------------------------------------------------- // perform the Thomas algorithm; first, FORWARD ELIMINATION //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, i, m, i1, i2, fac1) firstprivate(ny2, k, grid_points) for(j = 1; j <= ny2; j++) { /*************** Clava msgError ************** unsolved dependency for arrayAccess lhs use : RWR unsolved dependency for arrayAccess rhs use : RW ****************************************/ for(i = 0; i <= grid_points[0] - 3; i++) { i1 = i + 1; i2 = i + 2; fac1 = 1.0 / lhs[j][i][2]; lhs[j][i][3] = fac1 * lhs[j][i][3]; lhs[j][i][4] = fac1 * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; } lhs[j][i1][2] = lhs[j][i1][2] - lhs[j][i1][1] * lhs[j][i][3]; lhs[j][i1][3] = lhs[j][i1][3] - lhs[j][i1][1] * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhs[j][i1][1] * rhs[k][j][i][m]; } lhs[j][i2][1] = lhs[j][i2][1] - lhs[j][i2][0] * lhs[j][i][3]; lhs[j][i2][2] = lhs[j][i2][2] - lhs[j][i2][0] * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhs[j][i2][0] * rhs[k][j][i][m]; } } } //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, m, i, i1, fac1, fac2) firstprivate(ny2, k, grid_points) for(j = 1; j <= ny2; j++) { i = grid_points[0] - 2; i1 = grid_points[0] - 1; fac1 = 1.0 / lhs[j][i][2]; lhs[j][i][3] = fac1 * lhs[j][i][3]; lhs[j][i][4] = fac1 * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; } lhs[j][i1][2] = lhs[j][i1][2] - lhs[j][i1][1] * lhs[j][i][3]; lhs[j][i1][3] = lhs[j][i1][3] - lhs[j][i1][1] * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhs[j][i1][1] * rhs[k][j][i][m]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0 / lhs[j][i1][2]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i1][m] = fac2 * rhs[k][j][i1][m]; } } //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, i, i1, i2, m, fac1) firstprivate(ny2, k, grid_points) for(j = 1; j <= ny2; j++) { /*************** Clava msgError ************** unsolved dependency for arrayAccess lhsp use : RWR unsolved dependency for arrayAccess rhs use : RW unsolved dependency for arrayAccess lhsm use : RWR ****************************************/ for(i = 0; i <= grid_points[0] - 3; i++) { i1 = i + 1; i2 = i + 2; m = 3; fac1 = 1.0 / lhsp[j][i][2]; lhsp[j][i][3] = fac1 * lhsp[j][i][3]; lhsp[j][i][4] = fac1 * lhsp[j][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsp[j][i1][2] = lhsp[j][i1][2] - lhsp[j][i1][1] * lhsp[j][i][3]; lhsp[j][i1][3] = lhsp[j][i1][3] - lhsp[j][i1][1] * lhsp[j][i][4]; rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsp[j][i1][1] * rhs[k][j][i][m]; lhsp[j][i2][1] = lhsp[j][i2][1] - lhsp[j][i2][0] * lhsp[j][i][3]; lhsp[j][i2][2] = lhsp[j][i2][2] - lhsp[j][i2][0] * lhsp[j][i][4]; rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhsp[j][i2][0] * rhs[k][j][i][m]; m = 4; fac1 = 1.0 / lhsm[j][i][2]; lhsm[j][i][3] = fac1 * lhsm[j][i][3]; lhsm[j][i][4] = fac1 * lhsm[j][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsm[j][i1][2] = lhsm[j][i1][2] - lhsm[j][i1][1] * lhsm[j][i][3]; lhsm[j][i1][3] = lhsm[j][i1][3] - lhsm[j][i1][1] * lhsm[j][i][4]; rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsm[j][i1][1] * rhs[k][j][i][m]; lhsm[j][i2][1] = lhsm[j][i2][1] - lhsm[j][i2][0] * lhsm[j][i][3]; lhsm[j][i2][2] = lhsm[j][i2][2] - lhsm[j][i2][0] * lhsm[j][i][4]; rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhsm[j][i2][0] * rhs[k][j][i][m]; } } //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, i, i1, m, fac1) firstprivate(ny2, k, grid_points) for(j = 1; j <= ny2; j++) { i = grid_points[0] - 2; i1 = grid_points[0] - 1; m = 3; fac1 = 1.0 / lhsp[j][i][2]; lhsp[j][i][3] = fac1 * lhsp[j][i][3]; lhsp[j][i][4] = fac1 * lhsp[j][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsp[j][i1][2] = lhsp[j][i1][2] - lhsp[j][i1][1] * lhsp[j][i][3]; lhsp[j][i1][3] = lhsp[j][i1][3] - lhsp[j][i1][1] * lhsp[j][i][4]; rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsp[j][i1][1] * rhs[k][j][i][m]; m = 4; fac1 = 1.0 / lhsm[j][i][2]; lhsm[j][i][3] = fac1 * lhsm[j][i][3]; lhsm[j][i][4] = fac1 * lhsm[j][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsm[j][i1][2] = lhsm[j][i1][2] - lhsm[j][i1][1] * lhsm[j][i][3]; lhsm[j][i1][3] = lhsm[j][i1][3] - lhsm[j][i1][1] * lhsm[j][i][4]; rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsm[j][i1][1] * rhs[k][j][i][m]; //--------------------------------------------------------------------- // Scale the last row immediately //--------------------------------------------------------------------- rhs[k][j][i1][3] = rhs[k][j][i1][3] / lhsp[j][i1][2]; rhs[k][j][i1][4] = rhs[k][j][i1][4] / lhsm[j][i1][2]; } //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, m, i, i1) firstprivate(ny2, k, grid_points, lhs, lhsp, lhsm) for(j = 1; j <= ny2; j++) { i = grid_points[0] - 2; i1 = grid_points[0] - 1; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3] * rhs[k][j][i1][m]; } rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3] * rhs[k][j][i1][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3] * rhs[k][j][i1][4]; } //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, i, m, i1, i2) firstprivate(ny2, k, grid_points, lhs, lhsp, lhsm) for(j = 1; j <= ny2; j++) { /*************** Clava msgError ************** unsolved dependency for arrayAccess rhs use : RW ****************************************/ for(i = grid_points[0] - 3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3] * rhs[k][j][i1][m] - lhs[j][i][4] * rhs[k][j][i2][m]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3] * rhs[k][j][i1][3] - lhsp[j][i][4] * rhs[k][j][i2][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3] * rhs[k][j][i1][4] - lhsm[j][i][4] * rhs[k][j][i2][4]; } } } //--------------------------------------------------------------------- // Do the block-diagonal inversion //--------------------------------------------------------------------- ninvr(); } //--------------------------------------------------------------------- // this function performs the solution of the approximate factorization // step in the y-direction for all five matrix components // simultaneously. The Thomas algorithm is employed to solve the // systems for the y-lines. Boundary conditions are non-periodic //--------------------------------------------------------------------- void y_solve() { int i, j, k, j1, j2, m; double ru1, fac1, fac2; double rhoq[36]; double cv[36]; double lhs[37][37][5]; double lhsp[37][37][5]; double lhsm[37][37][5]; #pragma omp parallel for default(shared) private(k, i, j, m, ru1, j1, j2, fac1, fac2) firstprivate(nx2, ny2, c3c4, con43, c1c5, dy3, dy5, dymax, dy1, dtty2, dtty1, c2dtty1, comz5, comz4, comz1, comz6, grid_points, rho_i, vs, speed, lhs, lhsp, lhsm, cv, rhoq) for(k = 1; k <= grid_points[2] - 2; k++) { lhsinitj(ny2 + 1, nx2, lhs, lhsp, lhsm); //--------------------------------------------------------------------- // Computes the left hand side for the three y-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(i, j, ru1) firstprivate(c3c4, k, con43, c1c5, dy3, dy5, dymax, dy1, dtty2, dtty1, c2dtty1, grid_points, rho_i, vs, cv, rhoq) for(i = 1; i <= grid_points[0] - 2; i++) { // #pragma omp parallel for default(shared) private(j, ru1) firstprivate(c3c4, k, i, con43, c1c5, dy3, dy5, dymax, dy1, grid_points, rho_i, vs) for(j = 0; j <= grid_points[1] - 1; j++) { ru1 = c3c4 * rho_i[k][j][i]; cv[j] = vs[k][j][i]; rhoq[j] = ((((dy3 + con43 * ru1) > (dy5 + c1c5 * ru1) ? (dy3 + con43 * ru1) : (dy5 + c1c5 * ru1))) > (((dymax + ru1) > (dy1) ? (dymax + ru1) : (dy1))) ? (((dy3 + con43 * ru1) > (dy5 + c1c5 * ru1) ? (dy3 + con43 * ru1) : (dy5 + c1c5 * ru1))) : (((dymax + ru1) > (dy1) ? (dymax + ru1) : (dy1)))); } // #pragma omp parallel for default(shared) private(j) firstprivate(i, dtty2, dtty1, c2dtty1, grid_points, cv, rhoq) for(j = 1; j <= grid_points[1] - 2; j++) { lhs[j][i][0] = 0.0; lhs[j][i][1] = -dtty2 * cv[j - 1] - dtty1 * rhoq[j - 1]; lhs[j][i][2] = 1.0 + c2dtty1 * rhoq[j]; lhs[j][i][3] = dtty2 * cv[j + 1] - dtty1 * rhoq[j + 1]; lhs[j][i][4] = 0.0; } } //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(i, j) firstprivate(comz5, comz4, comz1, comz6, grid_points) for(i = 1; i <= grid_points[0] - 2; i++) { j = 1; lhs[j][i][2] = lhs[j][i][2] + comz5; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j][i][4] = lhs[j][i][4] + comz1; lhs[j + 1][i][1] = lhs[j + 1][i][1] - comz4; lhs[j + 1][i][2] = lhs[j + 1][i][2] + comz6; lhs[j + 1][i][3] = lhs[j + 1][i][3] - comz4; lhs[j + 1][i][4] = lhs[j + 1][i][4] + comz1; } // #pragma omp parallel for default(shared) private(j, i) firstprivate(comz1, comz4, comz6, grid_points) for(j = 3; j <= grid_points[1] - 4; j++) { // #pragma omp parallel for default(shared) private(i) firstprivate(j, comz1, comz4, comz6, grid_points) for(i = 1; i <= grid_points[0] - 2; i++) { lhs[j][i][0] = lhs[j][i][0] + comz1; lhs[j][i][1] = lhs[j][i][1] - comz4; lhs[j][i][2] = lhs[j][i][2] + comz6; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j][i][4] = lhs[j][i][4] + comz1; } } // #pragma omp parallel for default(shared) private(i, j) firstprivate(comz1, comz4, comz6, comz5, grid_points) for(i = 1; i <= grid_points[0] - 2; i++) { j = grid_points[1] - 3; lhs[j][i][0] = lhs[j][i][0] + comz1; lhs[j][i][1] = lhs[j][i][1] - comz4; lhs[j][i][2] = lhs[j][i][2] + comz6; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j + 1][i][0] = lhs[j + 1][i][0] + comz1; lhs[j + 1][i][1] = lhs[j + 1][i][1] - comz4; lhs[j + 1][i][2] = lhs[j + 1][i][2] + comz5; } //--------------------------------------------------------------------- // subsequently, for (the other two factors //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(j, i) firstprivate(dtty2, k, grid_points, lhs, speed) for(j = 1; j <= grid_points[1] - 2; j++) { // #pragma omp parallel for default(shared) private(i) firstprivate(j, dtty2, k, grid_points, lhs, speed) for(i = 1; i <= grid_points[0] - 2; i++) { lhsp[j][i][0] = lhs[j][i][0]; lhsp[j][i][1] = lhs[j][i][1] - dtty2 * speed[k][j - 1][i]; lhsp[j][i][2] = lhs[j][i][2]; lhsp[j][i][3] = lhs[j][i][3] + dtty2 * speed[k][j + 1][i]; lhsp[j][i][4] = lhs[j][i][4]; lhsm[j][i][0] = lhs[j][i][0]; lhsm[j][i][1] = lhs[j][i][1] + dtty2 * speed[k][j - 1][i]; lhsm[j][i][2] = lhs[j][i][2]; lhsm[j][i][3] = lhs[j][i][3] - dtty2 * speed[k][j + 1][i]; lhsm[j][i][4] = lhs[j][i][4]; } } //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- /*************** Clava msgError ************** unsolved dependency for arrayAccess lhs use : RWR unsolved dependency for arrayAccess rhs use : RW ****************************************/ for(j = 0; j <= grid_points[1] - 3; j++) { j1 = j + 1; j2 = j + 2; // #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(j, k, grid_points) for(i = 1; i <= grid_points[0] - 2; i++) { fac1 = 1.0 / lhs[j][i][2]; lhs[j][i][3] = fac1 * lhs[j][i][3]; lhs[j][i][4] = fac1 * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; } lhs[j1][i][2] = lhs[j1][i][2] - lhs[j1][i][1] * lhs[j][i][3]; lhs[j1][i][3] = lhs[j1][i][3] - lhs[j1][i][1] * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhs[j1][i][1] * rhs[k][j][i][m]; } lhs[j2][i][1] = lhs[j2][i][1] - lhs[j2][i][0] * lhs[j][i][3]; lhs[j2][i][2] = lhs[j2][i][2] - lhs[j2][i][0] * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhs[j2][i][0] * rhs[k][j][i][m]; } } } //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- j = grid_points[1] - 2; j1 = grid_points[1] - 1; // #pragma omp parallel for default(shared) private(i, m, fac1, fac2) firstprivate(j, k, j1, grid_points) for(i = 1; i <= grid_points[0] - 2; i++) { fac1 = 1.0 / lhs[j][i][2]; lhs[j][i][3] = fac1 * lhs[j][i][3]; lhs[j][i][4] = fac1 * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; } lhs[j1][i][2] = lhs[j1][i][2] - lhs[j1][i][1] * lhs[j][i][3]; lhs[j1][i][3] = lhs[j1][i][3] - lhs[j1][i][1] * lhs[j][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhs[j1][i][1] * rhs[k][j][i][m]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0 / lhs[j1][i][2]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j1][i][m] = fac2 * rhs[k][j1][i][m]; } } //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- /*************** Clava msgError ************** unsolved dependency for arrayAccess lhsp use : RWR unsolved dependency for arrayAccess rhs use : RW unsolved dependency for arrayAccess lhsm use : RWR ****************************************/ for(j = 0; j <= grid_points[1] - 3; j++) { j1 = j + 1; j2 = j + 2; // #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(j, k, grid_points) for(i = 1; i <= grid_points[0] - 2; i++) { m = 3; fac1 = 1.0 / lhsp[j][i][2]; lhsp[j][i][3] = fac1 * lhsp[j][i][3]; lhsp[j][i][4] = fac1 * lhsp[j][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsp[j1][i][2] = lhsp[j1][i][2] - lhsp[j1][i][1] * lhsp[j][i][3]; lhsp[j1][i][3] = lhsp[j1][i][3] - lhsp[j1][i][1] * lhsp[j][i][4]; rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsp[j1][i][1] * rhs[k][j][i][m]; lhsp[j2][i][1] = lhsp[j2][i][1] - lhsp[j2][i][0] * lhsp[j][i][3]; lhsp[j2][i][2] = lhsp[j2][i][2] - lhsp[j2][i][0] * lhsp[j][i][4]; rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhsp[j2][i][0] * rhs[k][j][i][m]; m = 4; fac1 = 1.0 / lhsm[j][i][2]; lhsm[j][i][3] = fac1 * lhsm[j][i][3]; lhsm[j][i][4] = fac1 * lhsm[j][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsm[j1][i][2] = lhsm[j1][i][2] - lhsm[j1][i][1] * lhsm[j][i][3]; lhsm[j1][i][3] = lhsm[j1][i][3] - lhsm[j1][i][1] * lhsm[j][i][4]; rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsm[j1][i][1] * rhs[k][j][i][m]; lhsm[j2][i][1] = lhsm[j2][i][1] - lhsm[j2][i][0] * lhsm[j][i][3]; lhsm[j2][i][2] = lhsm[j2][i][2] - lhsm[j2][i][0] * lhsm[j][i][4]; rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhsm[j2][i][0] * rhs[k][j][i][m]; } } //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- j = grid_points[1] - 2; j1 = grid_points[1] - 1; // #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(j, k, j1, grid_points) for(i = 1; i <= grid_points[0] - 2; i++) { m = 3; fac1 = 1.0 / lhsp[j][i][2]; lhsp[j][i][3] = fac1 * lhsp[j][i][3]; lhsp[j][i][4] = fac1 * lhsp[j][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsp[j1][i][2] = lhsp[j1][i][2] - lhsp[j1][i][1] * lhsp[j][i][3]; lhsp[j1][i][3] = lhsp[j1][i][3] - lhsp[j1][i][1] * lhsp[j][i][4]; rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsp[j1][i][1] * rhs[k][j][i][m]; m = 4; fac1 = 1.0 / lhsm[j][i][2]; lhsm[j][i][3] = fac1 * lhsm[j][i][3]; lhsm[j][i][4] = fac1 * lhsm[j][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsm[j1][i][2] = lhsm[j1][i][2] - lhsm[j1][i][1] * lhsm[j][i][3]; lhsm[j1][i][3] = lhsm[j1][i][3] - lhsm[j1][i][1] * lhsm[j][i][4]; rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsm[j1][i][1] * rhs[k][j][i][m]; //--------------------------------------------------------------------- // Scale the last row immediately //--------------------------------------------------------------------- rhs[k][j1][i][3] = rhs[k][j1][i][3] / lhsp[j1][i][2]; rhs[k][j1][i][4] = rhs[k][j1][i][4] / lhsm[j1][i][2]; } //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- j = grid_points[1] - 2; j1 = grid_points[1] - 1; // #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, j1, grid_points, lhs, lhsp, lhsm) for(i = 1; i <= grid_points[0] - 2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3] * rhs[k][j1][i][m]; } rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3] * rhs[k][j1][i][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3] * rhs[k][j1][i][4]; } //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- /*************** Clava msgError ************** unsolved dependency for arrayAccess rhs use : RW ****************************************/ for(j = grid_points[1] - 3; j >= 0; j--) { j1 = j + 1; j2 = j + 2; // #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, grid_points, lhs, lhsp, lhsm) for(i = 1; i <= grid_points[0] - 2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3] * rhs[k][j1][i][m] - lhs[j][i][4] * rhs[k][j2][i][m]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3] * rhs[k][j1][i][3] - lhsp[j][i][4] * rhs[k][j2][i][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3] * rhs[k][j1][i][4] - lhsm[j][i][4] * rhs[k][j2][i][4]; } } } pinvr(); } //--------------------------------------------------------------------- // this function performs the solution of the approximate factorization // step in the z-direction for all five matrix components // simultaneously. The Thomas algorithm is employed to solve the // systems for the z-lines. Boundary conditions are non-periodic //--------------------------------------------------------------------- void z_solve() { int i, j, k, k1, k2, m; double ru1, fac1, fac2; double rhos[36]; double cv[36]; double lhs[37][37][5]; double lhsp[37][37][5]; double lhsm[37][37][5]; #pragma omp parallel for default(shared) private(j, i, k, m, ru1, k1, k2, fac1, fac2) firstprivate(ny2, nx2, nz2, c3c4, con43, c1c5, dz4, dz5, dzmax, dz1, dttz2, dttz1, c2dttz1, comz5, comz4, comz1, comz6, rho_i, ws, speed, grid_points, lhs, lhsp, lhsm, cv, rhos) for(j = 1; j <= ny2; j++) { lhsinitj(nz2 + 1, nx2, lhs, lhsp, lhsm); //--------------------------------------------------------------------- // Computes the left hand side for the three z-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(i, k, ru1) firstprivate(nx2, nz2, c3c4, j, con43, c1c5, dz4, dz5, dzmax, dz1, dttz2, dttz1, c2dttz1, rho_i, ws, cv, rhos) for(i = 1; i <= nx2; i++) { // #pragma omp parallel for default(shared) private(k, ru1) firstprivate(nz2, c3c4, j, i, con43, c1c5, dz4, dz5, dzmax, dz1, rho_i, ws) for(k = 0; k <= nz2 + 1; k++) { ru1 = c3c4 * rho_i[k][j][i]; cv[k] = ws[k][j][i]; rhos[k] = ((((dz4 + con43 * ru1) > (dz5 + c1c5 * ru1) ? (dz4 + con43 * ru1) : (dz5 + c1c5 * ru1))) > (((dzmax + ru1) > (dz1) ? (dzmax + ru1) : (dz1))) ? (((dz4 + con43 * ru1) > (dz5 + c1c5 * ru1) ? (dz4 + con43 * ru1) : (dz5 + c1c5 * ru1))) : (((dzmax + ru1) > (dz1) ? (dzmax + ru1) : (dz1)))); } // #pragma omp parallel for default(shared) private(k) firstprivate(nz2, i, dttz2, dttz1, c2dttz1, cv, rhos) for(k = 1; k <= nz2; k++) { lhs[k][i][0] = 0.0; lhs[k][i][1] = -dttz2 * cv[k - 1] - dttz1 * rhos[k - 1]; lhs[k][i][2] = 1.0 + c2dttz1 * rhos[k]; lhs[k][i][3] = dttz2 * cv[k + 1] - dttz1 * rhos[k + 1]; lhs[k][i][4] = 0.0; } } //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(i, k) firstprivate(nx2, comz5, comz4, comz1, comz6) for(i = 1; i <= nx2; i++) { k = 1; lhs[k][i][2] = lhs[k][i][2] + comz5; lhs[k][i][3] = lhs[k][i][3] - comz4; lhs[k][i][4] = lhs[k][i][4] + comz1; k = 2; lhs[k][i][1] = lhs[k][i][1] - comz4; lhs[k][i][2] = lhs[k][i][2] + comz6; lhs[k][i][3] = lhs[k][i][3] - comz4; lhs[k][i][4] = lhs[k][i][4] + comz1; } // #pragma omp parallel for default(shared) private(k, i) firstprivate(nz2, nx2, comz1, comz4, comz6) for(k = 3; k <= nz2 - 2; k++) { // #pragma omp parallel for default(shared) private(i) firstprivate(nx2, k, comz1, comz4, comz6) for(i = 1; i <= nx2; i++) { lhs[k][i][0] = lhs[k][i][0] + comz1; lhs[k][i][1] = lhs[k][i][1] - comz4; lhs[k][i][2] = lhs[k][i][2] + comz6; lhs[k][i][3] = lhs[k][i][3] - comz4; lhs[k][i][4] = lhs[k][i][4] + comz1; } } // #pragma omp parallel for default(shared) private(i, k) firstprivate(nx2, nz2, comz1, comz4, comz6, comz5) for(i = 1; i <= nx2; i++) { k = nz2 - 1; lhs[k][i][0] = lhs[k][i][0] + comz1; lhs[k][i][1] = lhs[k][i][1] - comz4; lhs[k][i][2] = lhs[k][i][2] + comz6; lhs[k][i][3] = lhs[k][i][3] - comz4; k = nz2; lhs[k][i][0] = lhs[k][i][0] + comz1; lhs[k][i][1] = lhs[k][i][1] - comz4; lhs[k][i][2] = lhs[k][i][2] + comz5; } //--------------------------------------------------------------------- // subsequently, fill the other factors (u+c), (u-c) //--------------------------------------------------------------------- // #pragma omp parallel for default(shared) private(k, i) firstprivate(nz2, nx2, dttz2, j, lhs, speed) for(k = 1; k <= nz2; k++) { // #pragma omp parallel for default(shared) private(i) firstprivate(nx2, k, dttz2, j, lhs, speed) for(i = 1; i <= nx2; i++) { lhsp[k][i][0] = lhs[k][i][0]; lhsp[k][i][1] = lhs[k][i][1] - dttz2 * speed[k - 1][j][i]; lhsp[k][i][2] = lhs[k][i][2]; lhsp[k][i][3] = lhs[k][i][3] + dttz2 * speed[k + 1][j][i]; lhsp[k][i][4] = lhs[k][i][4]; lhsm[k][i][0] = lhs[k][i][0]; lhsm[k][i][1] = lhs[k][i][1] + dttz2 * speed[k - 1][j][i]; lhsm[k][i][2] = lhs[k][i][2]; lhsm[k][i][3] = lhs[k][i][3] - dttz2 * speed[k + 1][j][i]; lhsm[k][i][4] = lhs[k][i][4]; } } //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- /*************** Clava msgError ************** unsolved dependency for arrayAccess lhs use : RWR unsolved dependency for arrayAccess rhs use : RW ****************************************/ for(k = 0; k <= grid_points[2] - 3; k++) { k1 = k + 1; k2 = k + 2; // #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(nx2, k, j) for(i = 1; i <= nx2; i++) { fac1 = 1.0 / lhs[k][i][2]; lhs[k][i][3] = fac1 * lhs[k][i][3]; lhs[k][i][4] = fac1 * lhs[k][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; } lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1] * lhs[k][i][3]; lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1] * lhs[k][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1] * rhs[k][j][i][m]; } lhs[k2][i][1] = lhs[k2][i][1] - lhs[k2][i][0] * lhs[k][i][3]; lhs[k2][i][2] = lhs[k2][i][2] - lhs[k2][i][0] * lhs[k][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhs[k2][i][0] * rhs[k][j][i][m]; } } } //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- k = grid_points[2] - 2; k1 = grid_points[2] - 1; // #pragma omp parallel for default(shared) private(i, m, fac1, fac2) firstprivate(nx2, k, j, k1) for(i = 1; i <= nx2; i++) { fac1 = 1.0 / lhs[k][i][2]; lhs[k][i][3] = fac1 * lhs[k][i][3]; lhs[k][i][4] = fac1 * lhs[k][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; } lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1] * lhs[k][i][3]; lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1] * lhs[k][i][4]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1] * rhs[k][j][i][m]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0 / lhs[k1][i][2]; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k1][j][i][m] = fac2 * rhs[k1][j][i][m]; } } //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- /*************** Clava msgError ************** unsolved dependency for arrayAccess lhsp use : RWR unsolved dependency for arrayAccess rhs use : RW unsolved dependency for arrayAccess lhsm use : RWR ****************************************/ for(k = 0; k <= grid_points[2] - 3; k++) { k1 = k + 1; k2 = k + 2; // #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(nx2, k, j) for(i = 1; i <= nx2; i++) { m = 3; fac1 = 1.0 / lhsp[k][i][2]; lhsp[k][i][3] = fac1 * lhsp[k][i][3]; lhsp[k][i][4] = fac1 * lhsp[k][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1] * lhsp[k][i][3]; lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1] * lhsp[k][i][4]; rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1] * rhs[k][j][i][m]; lhsp[k2][i][1] = lhsp[k2][i][1] - lhsp[k2][i][0] * lhsp[k][i][3]; lhsp[k2][i][2] = lhsp[k2][i][2] - lhsp[k2][i][0] * lhsp[k][i][4]; rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsp[k2][i][0] * rhs[k][j][i][m]; m = 4; fac1 = 1.0 / lhsm[k][i][2]; lhsm[k][i][3] = fac1 * lhsm[k][i][3]; lhsm[k][i][4] = fac1 * lhsm[k][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1] * lhsm[k][i][3]; lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1] * lhsm[k][i][4]; rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1] * rhs[k][j][i][m]; lhsm[k2][i][1] = lhsm[k2][i][1] - lhsm[k2][i][0] * lhsm[k][i][3]; lhsm[k2][i][2] = lhsm[k2][i][2] - lhsm[k2][i][0] * lhsm[k][i][4]; rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsm[k2][i][0] * rhs[k][j][i][m]; } } //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- k = grid_points[2] - 2; k1 = grid_points[2] - 1; // #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(nx2, k, j, k1) for(i = 1; i <= nx2; i++) { m = 3; fac1 = 1.0 / lhsp[k][i][2]; lhsp[k][i][3] = fac1 * lhsp[k][i][3]; lhsp[k][i][4] = fac1 * lhsp[k][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1] * lhsp[k][i][3]; lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1] * lhsp[k][i][4]; rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1] * rhs[k][j][i][m]; m = 4; fac1 = 1.0 / lhsm[k][i][2]; lhsm[k][i][3] = fac1 * lhsm[k][i][3]; lhsm[k][i][4] = fac1 * lhsm[k][i][4]; rhs[k][j][i][m] = fac1 * rhs[k][j][i][m]; lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1] * lhsm[k][i][3]; lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1] * lhsm[k][i][4]; rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1] * rhs[k][j][i][m]; //--------------------------------------------------------------------- // Scale the last row immediately (some of this is overkill // if this is the last cell) //--------------------------------------------------------------------- rhs[k1][j][i][3] = rhs[k1][j][i][3] / lhsp[k1][i][2]; rhs[k1][j][i][4] = rhs[k1][j][i][4] / lhsm[k1][i][2]; } //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- k = grid_points[2] - 2; k1 = grid_points[2] - 1; // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, k1, j, lhs, lhsp, lhsm) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3] * rhs[k1][j][i][m]; } rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3] * rhs[k1][j][i][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3] * rhs[k1][j][i][4]; } //--------------------------------------------------------------------- // Whether or not this is the last processor, we always have // to complete the back-substitution //--------------------------------------------------------------------- //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- /*************** Clava msgError ************** unsolved dependency for arrayAccess rhs use : RW ****************************************/ for(k = grid_points[2] - 3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; // #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, lhs, lhsp, lhsm) for(i = 1; i <= nx2; i++) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3] * rhs[k1][j][i][m] - lhs[k][i][4] * rhs[k2][j][i][m]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3] * rhs[k1][j][i][3] - lhsp[k][i][4] * rhs[k2][j][i][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3] * rhs[k1][j][i][4] - lhsm[k][i][4] * rhs[k2][j][i][4]; } } } tzetar(); } void set_constants() { ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 0.5; ce[0][7] = 0.02; ce[0][8] = 0.01; ce[0][9] = 0.03; ce[0][10] = 0.5; ce[0][11] = 0.4; ce[0][12] = 0.3; ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 0.01; ce[1][8] = 0.03; ce[1][9] = 0.02; ce[1][10] = 0.4; ce[1][11] = 0.3; ce[1][12] = 0.5; ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 0.04; ce[2][8] = 0.03; ce[2][9] = 0.05; ce[2][10] = 0.3; ce[2][11] = 0.5; ce[2][12] = 0.4; ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 0.03; ce[3][8] = 0.05; ce[3][9] = 0.04; ce[3][10] = 0.2; ce[3][11] = 0.1; ce[3][12] = 0.3; ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 0.1; ce[4][5] = 0.4; ce[4][6] = 0.3; ce[4][7] = 0.05; ce[4][8] = 0.04; ce[4][9] = 0.03; ce[4][10] = 0.1; ce[4][11] = 0.3; ce[4][12] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; bt = sqrt(0.5); dnxm1 = 1.0 / (double) (grid_points[0] - 1); dnym1 = 1.0 / (double) (grid_points[1] - 1); dnzm1 = 1.0 / (double) (grid_points[2] - 1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0 - c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = ((dx3) > (dx4) ? (dx3) : (dx4)); dymax = ((dy2) > (dy4) ? (dy2) : (dy4)); dzmax = ((dz2) > (dz3) ? (dz2) : (dz3)); dssp = 0.25 * ((dx1) > (((dy1) > (dz1) ? (dy1) : (dz1))) ? (dx1) : (((dy1) > (dz1) ? (dy1) : (dz1)))); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt * tx1; dttx2 = dt * tx2; dtty1 = dt * ty1; dtty2 = dt * ty2; dttz1 = dt * tz1; dttz2 = dt * tz2; c2dttx1 = 2.0 * dttx1; c2dtty1 = 2.0 * dtty1; c2dttz1 = 2.0 * dttz1; dtdssp = dt * dssp; comz1 = dtdssp; comz4 = 4.0 * dtdssp; comz5 = 5.0 * dtdssp; comz6 = 6.0 * dtdssp; c3c4tx3 = c3c4 * tx3; c3c4ty3 = c3c4 * ty3; c3c4tz3 = c3c4 * tz3; dx1tx1 = dx1 * tx1; dx2tx1 = dx2 * tx1; dx3tx1 = dx3 * tx1; dx4tx1 = dx4 * tx1; dx5tx1 = dx5 * tx1; dy1ty1 = dy1 * ty1; dy2ty1 = dy2 * ty1; dy3ty1 = dy3 * ty1; dy4ty1 = dy4 * ty1; dy5ty1 = dy5 * ty1; dz1tz1 = dz1 * tz1; dz2tz1 = dz2 * tz1; dz3tz1 = dz3 * tz1; dz4tz1 = dz4 * tz1; dz5tz1 = dz5 * tz1; c2iv = 2.5; con43 = 4.0 / 3.0; con16 = 1.0 / 6.0; xxcon1 = c3c4tx3 * con43 * tx3; xxcon2 = c3c4tx3 * tx3; xxcon3 = c3c4tx3 * conz1 * tx3; xxcon4 = c3c4tx3 * con16 * tx3; xxcon5 = c3c4tx3 * c1c5 * tx3; yycon1 = c3c4ty3 * con43 * ty3; yycon2 = c3c4ty3 * ty3; yycon3 = c3c4ty3 * conz1 * ty3; yycon4 = c3c4ty3 * con16 * ty3; yycon5 = c3c4ty3 * c1c5 * ty3; zzcon1 = c3c4tz3 * con43 * tz3; zzcon2 = c3c4tz3 * tz3; zzcon3 = c3c4tz3 * conz1 * tz3; zzcon4 = c3c4tz3 * con16 * tz3; zzcon5 = c3c4tz3 * c1c5 * tz3; } void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) { char size[16]; int j; printf("\n\n %s Benchmark Completed.\n", name); printf(" Class = %12c\n", class); // If this is not a grid-based problem (EP, FT, CG), then // we only print n1, which contains some measure of the // problem size. In that case, n2 and n3 are both zero. // Otherwise, we print the grid size n1xn2xn3 if((n2 == 0) && (n3 == 0)) { if((name[0] == 'E') && (name[1] == 'P')) { sprintf(size, "%15.0lf", pow(2.0, n1)); j = 14; if(size[j] == '.') { size[j] = ' '; j--; } size[j + 1] = '\0'; printf(" Size = %15s\n", size); } else { printf(" Size = %12d\n", n1); } } else { printf(" Size = %4dx%4dx%4d\n", n1, n2, n3); } printf(" Iterations = %12d\n", niter); printf(" Time in seconds = %12.4lf\n", t); printf(" Mop/s total = %15.2lf\n", mops); printf(" Operation type = %24s\n", optype); if(verified) printf(" Verification = %12s\n", "SUCCESSFUL"); else printf(" Verification = %12s\n", "UNSUCCESSFUL"); } void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (void *) 0); if(sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec; } /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time() { double t; wtime(&t); return (t); } /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear(int n) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start(int n) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop(int n) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read(int n) { return (elapsed[n]); }
shared.c
// An OpenMP clause keyword: num_threads // used as a regular variable in a clause's variable list // Extracted from NPB 3.2 benchmarks // // 6/10/2010 #ifdef _OPENMP #include <omp.h> #endif void c_print_results( ) { int num_threads, max_threads, omp =0, dynamic =0, none =0; // none is another OpenMP keyword used with default() max_threads = 1; num_threads = 1; /* figure out number of threads used */ #ifdef _OPENMP max_threads = omp_get_max_threads(); #pragma omp parallel num_threads(6) shared(num_threads,omp, none,dynamic) { #pragma omp master num_threads = omp_get_num_threads()+none+omp+dynamic; } #endif }
heat-3d.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* heat-3d.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "heat-3d.h" /* Array initialization. */ static void init_array (int n, DATA_TYPE POLYBENCH_3D(A, N, N, N, n, n, n), DATA_TYPE POLYBENCH_3D(B, N, N, N, n, n, n)) { int i, j, k; for (i = 0; i < n; i++) for (j = 0; j < n; j++) for (k = 0; k < n; k++) A[i][j][k] = B[i][j][k] = (DATA_TYPE) (i + j + (n - k)) * 10 / (n); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_3D(A, N, N, N, n, n, n)) { int i, j, k; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("A"); for (i = 0; i < n; i++) for (j = 0; j < n; j++) for (k = 0; k < n; k++) { if ((i * n * n + j * n + k) % 20 == 0) fprintf(POLYBENCH_DUMP_TARGET, "\n"); fprintf(POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, A[i][j][k]); } POLYBENCH_DUMP_END("A"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_heat_3d(int tsteps, int n, DATA_TYPE POLYBENCH_3D(A, N, N, N, n, n, n), DATA_TYPE POLYBENCH_3D(B, N, N, N, n, n, n)) { int t, i, j, k; for (t = 1; t <= TSTEPS; t++) { #pragma omp parallel for default(shared) private(i, j, k) firstprivate(n, A) for (i = 1; i < _PB_N - 1; i++) { for (j = 1; j < _PB_N - 1; j++) { for (k = 1; k < _PB_N - 1; k++) { B[i][j][k] = SCALAR_VAL(0.125) * (A[i + 1][j][k] - SCALAR_VAL(2.0) * A[i][j][k] + A[i - 1][j][k]) + SCALAR_VAL(0.125) * (A[i][j + 1][k] - SCALAR_VAL(2.0) * A[i][j][k] + A[i][j - 1][k]) + SCALAR_VAL(0.125) * (A[i][j][k + 1] - SCALAR_VAL(2.0) * A[i][j][k] + A[i][j][k - 1]) + A[i][j][k]; } } } #pragma omp parallel for default(shared) private(i, j, k) firstprivate(n, B) for (i = 1; i < _PB_N - 1; i++) { for (j = 1; j < _PB_N - 1; j++) { for (k = 1; k < _PB_N - 1; k++) { A[i][j][k] = SCALAR_VAL(0.125) * (B[i + 1][j][k] - SCALAR_VAL(2.0) * B[i][j][k] + B[i - 1][j][k]) + SCALAR_VAL(0.125) * (B[i][j + 1][k] - SCALAR_VAL(2.0) * B[i][j][k] + B[i][j - 1][k]) + SCALAR_VAL(0.125) * (B[i][j][k + 1] - SCALAR_VAL(2.0) * B[i][j][k] + B[i][j][k - 1]) + B[i][j][k]; } } } } } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int tsteps = TSTEPS; /* Variable declaration/allocation. */ POLYBENCH_3D_ARRAY_DECL(A, DATA_TYPE, N, N, N, n, n, n); POLYBENCH_3D_ARRAY_DECL(B, DATA_TYPE, N, N, N, n, n, n); /* Initialize array(s). */ init_array (n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_heat_3d (tsteps, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }
2.norace4.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; for (int i = 1; i < N; i++) #pragma omp simd for (int j = 1; j < N; j++) A[i - 1][j] += A[i][j] + i + j; } // CHECK: Region is Data Race Free. // END
CCGSubSurf_opensubdiv.c
/* * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * ***** END GPL LICENSE BLOCK ***** */ /** \file blender/blenkernel/intern/CCGSubSurf_opensubdiv.c * \ingroup bke */ #ifdef WITH_OPENSUBDIV #include "MEM_guardedalloc.h" #include "BLI_sys_types.h" // for intptr_t support #include "BLI_utildefines.h" /* for BLI_assert */ #include "BLI_listbase.h" #include "BLI_math.h" #include "BLI_threads.h" #include "CCGSubSurf.h" #include "CCGSubSurf_intern.h" #include "BKE_DerivedMesh.h" #include "BKE_subsurf.h" #include "DNA_userdef_types.h" #include "opensubdiv_capi.h" #include "opensubdiv_converter_capi.h" #include "GL/glew.h" #include "GPU_extensions.h" #define OSD_LOG if (false) printf static bool compare_ccg_derivedmesh_topology(CCGSubSurf *ss, DerivedMesh *dm) { const int num_verts = dm->getNumVerts(dm); const int num_edges = dm->getNumEdges(dm); const int num_polys = dm->getNumPolys(dm); const MEdge *medge = dm->getEdgeArray(dm); const MLoop *mloop = dm->getLoopArray(dm); const MPoly *mpoly = dm->getPolyArray(dm); /* Quick preliminary tests based on the number of verts and facces. */ { if (num_verts != ss->vMap->numEntries || num_edges != ss->eMap->numEntries || num_polys != ss->fMap->numEntries) { return false; } } /* Rather slow check for faces topology change. */ { CCGFaceIterator ccg_face_iter; for (ccgSubSurf_initFaceIterator(ss, &ccg_face_iter); !ccgFaceIterator_isStopped(&ccg_face_iter); ccgFaceIterator_next(&ccg_face_iter)) { /*const*/ CCGFace *ccg_face = ccgFaceIterator_getCurrent(&ccg_face_iter); const int poly_index = GET_INT_FROM_POINTER(ccgSubSurf_getFaceFaceHandle(ccg_face)); const MPoly *mp = &mpoly[poly_index]; int corner; if (ccg_face->numVerts != mp->totloop) { return false; } for (corner = 0; corner < ccg_face->numVerts; corner++) { /*const*/ CCGVert *ccg_vert = FACE_getVerts(ccg_face)[corner]; const int vert_index = GET_INT_FROM_POINTER(ccgSubSurf_getVertVertHandle(ccg_vert)); if (vert_index != mloop[mp->loopstart + corner].v) { return false; } } } } /* Check for edge topology change. */ { CCGEdgeIterator ccg_edge_iter; for (ccgSubSurf_initEdgeIterator(ss, &ccg_edge_iter); !ccgEdgeIterator_isStopped(&ccg_edge_iter); ccgEdgeIterator_next(&ccg_edge_iter)) { /* const */ CCGEdge *ccg_edge = ccgEdgeIterator_getCurrent(&ccg_edge_iter); /* const */ CCGVert *ccg_vert1 = ccg_edge->v0; /* const */ CCGVert *ccg_vert2 = ccg_edge->v1; const int ccg_vert1_index = GET_INT_FROM_POINTER(ccgSubSurf_getVertVertHandle(ccg_vert1)); const int ccg_vert2_index = GET_INT_FROM_POINTER(ccgSubSurf_getVertVertHandle(ccg_vert2)); const int edge_index = GET_INT_FROM_POINTER(ccgSubSurf_getEdgeEdgeHandle(ccg_edge)); const MEdge *me = &medge[edge_index]; if (me->v1 != ccg_vert1_index || me->v2 != ccg_vert2_index) { return false; } } } /* TODO(sergey): Crease topology changes detection. */ { CCGEdgeIterator ccg_edge_iter; for (ccgSubSurf_initEdgeIterator(ss, &ccg_edge_iter); !ccgEdgeIterator_isStopped(&ccg_edge_iter); ccgEdgeIterator_next(&ccg_edge_iter)) { /* const */ CCGEdge *ccg_edge = ccgEdgeIterator_getCurrent(&ccg_edge_iter); const int edge_index = GET_INT_FROM_POINTER(ccgSubSurf_getEdgeEdgeHandle(ccg_edge)); if (ccg_edge->crease != medge[edge_index].crease) { return false; } } } return true; } static bool compare_osd_derivedmesh_topology(CCGSubSurf *ss, DerivedMesh *dm) { const OpenSubdiv_TopologyRefinerDescr *topology_refiner; OpenSubdiv_Converter converter; bool result; if (ss->osd_mesh == NULL && ss->osd_topology_refiner == NULL) { return true; } /* TODO(sergey): De-duplicate with topology counter at the bottom of * the file. */ if (ss->osd_topology_refiner != NULL) { topology_refiner = ss->osd_topology_refiner; } else { topology_refiner = openSubdiv_getGLMeshTopologyRefiner(ss->osd_mesh); } ccgSubSurf_converter_setup_from_derivedmesh(ss, dm, &converter); result = openSubdiv_topologyRefnerCompareConverter(topology_refiner, &converter); ccgSubSurf_converter_free(&converter); return result; } static bool opensubdiv_is_topology_changed(CCGSubSurf *ss, DerivedMesh *dm) { if (ss->osd_compute != U.opensubdiv_compute_type) { return true; } if (ss->osd_topology_refiner != NULL) { int levels = openSubdiv_topologyRefinerGetSubdivLevel( ss->osd_topology_refiner); BLI_assert(ss->osd_mesh_invalid == true); if (levels != ss->subdivLevels) { return true; } } if (ss->osd_mesh != NULL && ss->osd_mesh_invalid == false) { const OpenSubdiv_TopologyRefinerDescr *topology_refiner = openSubdiv_getGLMeshTopologyRefiner(ss->osd_mesh); int levels = openSubdiv_topologyRefinerGetSubdivLevel(topology_refiner); BLI_assert(ss->osd_topology_refiner == NULL); if (levels != ss->subdivLevels) { return true; } } if (ss->skip_grids == false) { return compare_ccg_derivedmesh_topology(ss, dm) == false; } else { return compare_osd_derivedmesh_topology(ss, dm) == false; } return false; } void ccgSubSurf_checkTopologyChanged(CCGSubSurf *ss, DerivedMesh *dm) { if (opensubdiv_is_topology_changed(ss, dm)) { /* ** Make sure both GPU and CPU backends are properly reset. ** */ ss->osd_coarse_coords_invalid = true; /* Reset GPU part. */ ss->osd_mesh_invalid = true; if (ss->osd_topology_refiner != NULL) { openSubdiv_deleteTopologyRefinerDescr(ss->osd_topology_refiner); ss->osd_topology_refiner = NULL; } /* Reste CPU side. */ if (ss->osd_evaluator != NULL) { openSubdiv_deleteEvaluatorDescr(ss->osd_evaluator); ss->osd_evaluator = NULL; } } } static void ccgSubSurf__updateGLMeshCoords(CCGSubSurf *ss) { BLI_assert(ss->meshIFC.numLayers == 3); openSubdiv_osdGLMeshUpdateVertexBuffer(ss->osd_mesh, (float *) ss->osd_coarse_coords, 0, ss->osd_num_coarse_coords); } bool ccgSubSurf_prepareGLMesh(CCGSubSurf *ss, bool use_osd_glsl, int active_uv_index) { int compute_type; switch (U.opensubdiv_compute_type) { #define CHECK_COMPUTE_TYPE(type) \ case USER_OPENSUBDIV_COMPUTE_ ## type: \ compute_type = OPENSUBDIV_EVALUATOR_ ## type; \ break; CHECK_COMPUTE_TYPE(CPU) CHECK_COMPUTE_TYPE(OPENMP) CHECK_COMPUTE_TYPE(OPENCL) CHECK_COMPUTE_TYPE(CUDA) CHECK_COMPUTE_TYPE(GLSL_TRANSFORM_FEEDBACK) CHECK_COMPUTE_TYPE(GLSL_COMPUTE) #undef CHECK_COMPUTE_TYPE } if (ss->osd_vao == 0) { glGenVertexArrays(1, &ss->osd_vao); } if (ss->osd_mesh_invalid) { if (ss->osd_mesh != NULL) { ccgSubSurf__delete_osdGLMesh(ss->osd_mesh); ss->osd_mesh = NULL; } ss->osd_mesh_invalid = false; } if (ss->osd_mesh == NULL) { if (ss->osd_topology_refiner == NULL) { /* Happens with empty meshes. */ /* TODO(sergey): Add assert that mesh is indeed empty. */ return false; } ss->osd_mesh = openSubdiv_createOsdGLMeshFromTopologyRefiner( ss->osd_topology_refiner, compute_type, ss->subdivLevels); ss->osd_topology_refiner = NULL; if (UNLIKELY(ss->osd_mesh == NULL)) { /* Most likely compute device is not available. */ return false; } ccgSubSurf__updateGLMeshCoords(ss); openSubdiv_osdGLMeshRefine(ss->osd_mesh); openSubdiv_osdGLMeshSynchronize(ss->osd_mesh); ss->osd_coarse_coords_invalid = false; glBindVertexArray(ss->osd_vao); glBindBuffer(GL_ARRAY_BUFFER, openSubdiv_getOsdGLMeshVertexBuffer(ss->osd_mesh)); glEnableVertexAttribArray(0); glEnableVertexAttribArray(1); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 6, 0); glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 6, (float *)12); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindVertexArray(0); } else if (ss->osd_coarse_coords_invalid) { ccgSubSurf__updateGLMeshCoords(ss); openSubdiv_osdGLMeshRefine(ss->osd_mesh); openSubdiv_osdGLMeshSynchronize(ss->osd_mesh); ss->osd_coarse_coords_invalid = false; } openSubdiv_osdGLMeshDisplayPrepare(use_osd_glsl, active_uv_index); return true; } void ccgSubSurf_drawGLMesh(CCGSubSurf *ss, bool fill_quads, int start_partition, int num_partitions) { if (LIKELY(ss->osd_mesh != NULL)) { glBindVertexArray(ss->osd_vao); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, openSubdiv_getOsdGLMeshPatchIndexBuffer(ss->osd_mesh)); openSubdiv_osdGLMeshBindVertexBuffer(ss->osd_mesh); glBindVertexArray(ss->osd_vao); openSubdiv_osdGLMeshDisplay(ss->osd_mesh, fill_quads, start_partition, num_partitions); glBindVertexArray(0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); } } int ccgSubSurf_getNumGLMeshBaseFaces(CCGSubSurf *ss) { const OpenSubdiv_TopologyRefinerDescr *topology_refiner; if (ss->osd_topology_refiner != NULL) { topology_refiner = ss->osd_topology_refiner; } else if (ss->osd_mesh != NULL) { topology_refiner = openSubdiv_getGLMeshTopologyRefiner(ss->osd_mesh); } else { return 0; } return openSubdiv_topologyRefinerGetNumFaces(topology_refiner); } /* Get number of vertices in base faces in a particular GL mesh. */ int ccgSubSurf_getNumGLMeshBaseFaceVerts(CCGSubSurf *ss, int face) { const OpenSubdiv_TopologyRefinerDescr *topology_refiner; if (ss->osd_topology_refiner != NULL) { topology_refiner = ss->osd_topology_refiner; } else if (ss->osd_mesh != NULL) { topology_refiner = openSubdiv_getGLMeshTopologyRefiner(ss->osd_mesh); } else { return 0; } return openSubdiv_topologyRefinerGetNumFaceVerts(topology_refiner, face); } void ccgSubSurf_setSkipGrids(CCGSubSurf *ss, bool skip_grids) { ss->skip_grids = skip_grids; } bool ccgSubSurf_needGrids(CCGSubSurf *ss) { return ss->skip_grids == false; } BLI_INLINE void ccgSubSurf__mapGridToFace(int S, float grid_u, float grid_v, float *face_u, float *face_v) { float u, v; /* - Each grid covers half of the face along the edges. * - Grid's (0, 0) starts from the middle of the face. */ u = 0.5f - 0.5f * grid_u; v = 0.5f - 0.5f * grid_v; if (S == 0) { *face_u = v; *face_v = u; } else if (S == 1) { *face_u = 1.0f - u; *face_v = v; } else if (S == 2) { *face_u = 1.0f - v; *face_v = 1.0f - u; } else { *face_u = u; *face_v = 1.0f - v; } } BLI_INLINE void ccgSubSurf__mapEdgeToFace(int S, int edge_segment, bool inverse_edge, int edgeSize, float *face_u, float *face_v) { int t = inverse_edge ? edgeSize - edge_segment - 1 : edge_segment; if (S == 0) { *face_u = (float) t / (edgeSize - 1); *face_v = 0.0f; } else if (S == 1) { *face_u = 1.0f; *face_v = (float) t / (edgeSize - 1); } else if (S == 2) { *face_u = 1.0f - (float) t / (edgeSize - 1); *face_v = 1.0f; } else { *face_u = 0.0f; *face_v = 1.0f - (float) t / (edgeSize - 1); } } void ccgSubSurf_evaluatorSetFVarUV(CCGSubSurf *ss, DerivedMesh *dm, int layer_index) { MPoly *mpoly = dm->getPolyArray(dm); MLoopUV *mloopuv = CustomData_get_layer_n(&dm->loopData, CD_MLOOPUV, layer_index); int num_polys = dm->getNumPolys(dm); int index, poly; BLI_assert(ss->osd_evaluator != NULL); for (poly = 0, index = 0; poly < num_polys; poly++) { int loop; MPoly *mp = &mpoly[poly]; for (loop = 0; loop < mp->totloop; loop++, index++) { MLoopUV *mluv = &mloopuv[loop + mp->loopstart]; (void)mluv; /* TODO(sergey): Send mluv->uv to the evaluator's face varying * buffer. */ } } (void)ss; } void ccgSubSurf_evaluatorFVarUV(CCGSubSurf *ss, int face_index, int S, float grid_u, float grid_v, float uv[2]) { float face_u, face_v; ccgSubSurf__mapGridToFace(S, grid_u, grid_v, &face_u, &face_v); (void)ss; (void)face_index; /* TODO(sergey): Evaluate face varying coordinate. */ zero_v2(uv); } static bool opensubdiv_createEvaluator(CCGSubSurf *ss) { OpenSubdiv_Converter converter; OpenSubdiv_TopologyRefinerDescr *topology_refiner; if (ss->fMap->numEntries == 0) { /* OpenSubdiv doesn't support meshes without faces. */ return false; } ccgSubSurf_converter_setup_from_ccg(ss, &converter); topology_refiner = openSubdiv_createTopologyRefinerDescr(&converter); ccgSubSurf_converter_free(&converter); ss->osd_evaluator = openSubdiv_createEvaluatorDescr(topology_refiner, ss->subdivLevels); if (ss->osd_evaluator == NULL) { BLI_assert(!"OpenSubdiv initialization failed, should not happen."); return false; } return true; } static bool opensubdiv_ensureEvaluator(CCGSubSurf *ss) { if (ss->osd_evaluator == NULL) { OSD_LOG("Allocating new evaluator, %d verts\n", ss->vMap->numEntries); opensubdiv_createEvaluator(ss); } return ss->osd_evaluator != NULL; } static void opensubdiv_updateEvaluatorCoarsePositions(CCGSubSurf *ss) { float (*positions)[3]; int vertDataSize = ss->meshIFC.vertDataSize; int num_basis_verts = ss->vMap->numEntries; int i; /* TODO(sergey): Avoid allocation on every update. We could either update * coordinates in chunks of 1K vertices (which will only use stack memory) * or do some callback magic for OSD evaluator can invoke it and fill in * buffer directly. */ if (ss->meshIFC.numLayers == 3) { /* If all the components are to be initialized, no need to memset the * new memory block. */ positions = MEM_mallocN(3 * sizeof(float) * num_basis_verts, "OpenSubdiv coarse points"); } else { /* Calloc in order to have z component initialized to 0 for Uvs */ positions = MEM_callocN(3 * sizeof(float) * num_basis_verts, "OpenSubdiv coarse points"); } #pragma omp parallel for for (i = 0; i < ss->vMap->curSize; i++) { CCGVert *v = (CCGVert *) ss->vMap->buckets[i]; for (; v; v = v->next) { float *co = VERT_getCo(v, 0); BLI_assert(v->osd_index < ss->vMap->numEntries); VertDataCopy(positions[v->osd_index], co, ss); OSD_LOG("Point %d has value %f %f %f\n", v->osd_index, positions[v->osd_index][0], positions[v->osd_index][1], positions[v->osd_index][2]); } } openSubdiv_setEvaluatorCoarsePositions(ss->osd_evaluator, (float *)positions, 0, num_basis_verts); MEM_freeN(positions); } static void opensubdiv_evaluateQuadFaceGrids(CCGSubSurf *ss, CCGFace *face, const int osd_face_index) { int normalDataOffset = ss->normalDataOffset; int subdivLevels = ss->subdivLevels; int gridSize = ccg_gridsize(subdivLevels); int edgeSize = ccg_edgesize(subdivLevels); int vertDataSize = ss->meshIFC.vertDataSize; int S; bool do_normals = ss->meshIFC.numLayers == 3; #pragma omp parallel for for (S = 0; S < face->numVerts; S++) { int x, y, k; CCGEdge *edge = NULL; bool inverse_edge; for (x = 0; x < gridSize; x++) { for (y = 0; y < gridSize; y++) { float *co = FACE_getIFCo(face, subdivLevels, S, x, y); float *no = FACE_getIFNo(face, subdivLevels, S, x, y); float grid_u = (float) x / (gridSize - 1), grid_v = (float) y / (gridSize - 1); float face_u, face_v; float P[3], dPdu[3], dPdv[3]; ccgSubSurf__mapGridToFace(S, grid_u, grid_v, &face_u, &face_v); /* TODO(sergey): Need proper port. */ openSubdiv_evaluateLimit(ss->osd_evaluator, osd_face_index, face_u, face_v, P, do_normals ? dPdu : NULL, do_normals ? dPdv : NULL); OSD_LOG("face=%d, corner=%d, grid_u=%f, grid_v=%f, face_u=%f, face_v=%f, P=(%f, %f, %f)\n", osd_face_index, S, grid_u, grid_v, face_u, face_v, P[0], P[1], P[2]); VertDataCopy(co, P, ss); if (do_normals) { cross_v3_v3v3(no, dPdu, dPdv); normalize_v3(no); } if (x == gridSize - 1 && y == gridSize - 1) { float *vert_co = VERT_getCo(FACE_getVerts(face)[S], subdivLevels); VertDataCopy(vert_co, co, ss); if (do_normals) { float *vert_no = VERT_getNo(FACE_getVerts(face)[S], subdivLevels); VertDataCopy(vert_no, no, ss); } } if (S == 0 && x == 0 && y == 0) { float *center_co = (float *)FACE_getCenterData(face); VertDataCopy(center_co, co, ss); if (do_normals) { float *center_no = (float *)((byte *)FACE_getCenterData(face) + normalDataOffset); VertDataCopy(center_no, no, ss); } } } } for (x = 0; x < gridSize; x++) { VertDataCopy(FACE_getIECo(face, subdivLevels, S, x), FACE_getIFCo(face, subdivLevels, S, x, 0), ss); if (do_normals) { VertDataCopy(FACE_getIENo(face, subdivLevels, S, x), FACE_getIFNo(face, subdivLevels, S, x, 0), ss); } } for (k = 0; k < face->numVerts; k++) { CCGEdge *current_edge = FACE_getEdges(face)[k]; CCGVert **face_verts = FACE_getVerts(face); if (current_edge->v0 == face_verts[S] && current_edge->v1 == face_verts[(S + 1) % face->numVerts]) { edge = current_edge; inverse_edge = false; break; } if (current_edge->v1 == face_verts[S] && current_edge->v0 == face_verts[(S + 1) % face->numVerts]) { edge = current_edge; inverse_edge = true; break; } } BLI_assert(edge != NULL); for (x = 0; x < edgeSize; x++) { float u = 0, v = 0; float *co = EDGE_getCo(edge, subdivLevels, x); float *no = EDGE_getNo(edge, subdivLevels, x); float P[3], dPdu[3], dPdv[3]; ccgSubSurf__mapEdgeToFace(S, x, inverse_edge, edgeSize, &u, &v); /* TODO(sergey): Ideally we will re-use grid here, but for now * let's just re-evaluate for simplicity. */ /* TODO(sergey): Need proper port. */ openSubdiv_evaluateLimit(ss->osd_evaluator, osd_face_index, u, v, P, dPdu, dPdv); VertDataCopy(co, P, ss); if (do_normals) { cross_v3_v3v3(no, dPdu, dPdv); normalize_v3(no); } } } } static void opensubdiv_evaluateNGonFaceGrids(CCGSubSurf *ss, CCGFace *face, const int osd_face_index) { CCGVert **all_verts = FACE_getVerts(face); int normalDataOffset = ss->normalDataOffset; int subdivLevels = ss->subdivLevels; int gridSize = ccg_gridsize(subdivLevels); int edgeSize = ccg_edgesize(subdivLevels); int vertDataSize = ss->meshIFC.vertDataSize; int S; bool do_normals = ss->meshIFC.numLayers == 3; /* Note about handling non-quad faces. * * In order to deal with non-quad faces we need to split them * into a quads in the following way: * * | * (vert_next) * | * | * | * (face_center) ------------------- (v2) * | (o)--------------------> | * | | v | * | | | * | | | * | | | * | | y ^ | * | | | | * | v u x | | * | <---(o) | * ---- (vert_prev) ---- (v1) -------------------- (vert) * * This is how grids are expected to be stored and it's how * OpenSubdiv deals with non-quad faces using ptex face indices. * We only need to convert ptex (x, y) to grid (u, v) by some * simple flips and evaluate the ptex face. */ /* Evaluate face grids. */ #pragma omp parallel for for (S = 0; S < face->numVerts; S++) { int x, y; for (x = 0; x < gridSize; x++) { for (y = 0; y < gridSize; y++) { float *co = FACE_getIFCo(face, subdivLevels, S, x, y); float *no = FACE_getIFNo(face, subdivLevels, S, x, y); float u = 1.0f - (float) y / (gridSize - 1), v = 1.0f - (float) x / (gridSize - 1); float P[3], dPdu[3], dPdv[3]; /* TODO(sergey): Need proper port. */ openSubdiv_evaluateLimit(ss->osd_evaluator, osd_face_index + S, u, v, P, dPdu, dPdv); OSD_LOG("face=%d, corner=%d, u=%f, v=%f, P=(%f, %f, %f)\n", osd_face_index + S, S, u, v, P[0], P[1], P[2]); VertDataCopy(co, P, ss); if (do_normals) { cross_v3_v3v3(no, dPdu, dPdv); normalize_v3(no); } /* TODO(sergey): De-dpuplicate with the quad case. */ if (x == gridSize - 1 && y == gridSize - 1) { float *vert_co = VERT_getCo(FACE_getVerts(face)[S], subdivLevels); VertDataCopy(vert_co, co, ss); if (do_normals) { float *vert_no = VERT_getNo(FACE_getVerts(face)[S], subdivLevels); VertDataCopy(vert_no, no, ss); } } if (S == 0 && x == 0 && y == 0) { float *center_co = (float *)FACE_getCenterData(face); VertDataCopy(center_co, co, ss); if (do_normals) { float *center_no = (float *)((byte *)FACE_getCenterData(face) + normalDataOffset); VertDataCopy(center_no, no, ss); } } } } for (x = 0; x < gridSize; x++) { VertDataCopy(FACE_getIECo(face, subdivLevels, S, x), FACE_getIFCo(face, subdivLevels, S, x, 0), ss); if (do_normals) { VertDataCopy(FACE_getIENo(face, subdivLevels, S, x), FACE_getIFNo(face, subdivLevels, S, x, 0), ss); } } } /* Evaluate edges. */ for (S = 0; S < face->numVerts; S++) { CCGEdge *edge = FACE_getEdges(face)[S]; int x, S0, S1; bool flip; for (x = 0; x < face->numVerts; ++x) { if (all_verts[x] == edge->v0) { S0 = x; } else if (all_verts[x] == edge->v1) { S1 = x; } } if (S == face->numVerts - 1) { flip = S0 > S1; } else { flip = S0 < S1; } for (x = 0; x <= edgeSize / 2; x++) { float *edge_co = EDGE_getCo(edge, subdivLevels, x); float *edge_no = EDGE_getNo(edge, subdivLevels, x); float *face_edge_co; float *face_edge_no; if (flip) { face_edge_co = FACE_getIFCo(face, subdivLevels, S0, gridSize - 1, gridSize - 1 - x); face_edge_no = FACE_getIFNo(face, subdivLevels, S0, gridSize - 1, gridSize - 1 - x); } else { face_edge_co = FACE_getIFCo(face, subdivLevels, S0, gridSize - 1 - x, gridSize - 1); face_edge_no = FACE_getIFNo(face, subdivLevels, S0, gridSize - 1 - x, gridSize - 1); } VertDataCopy(edge_co, face_edge_co, ss); if (do_normals) { VertDataCopy(edge_no, face_edge_no, ss); } } for (x = edgeSize / 2 + 1; x < edgeSize; x++) { float *edge_co = EDGE_getCo(edge, subdivLevels, x); float *edge_no = EDGE_getNo(edge, subdivLevels, x); float *face_edge_co; float *face_edge_no; if (flip) { face_edge_co = FACE_getIFCo(face, subdivLevels, S1, x - edgeSize / 2, gridSize - 1); face_edge_no = FACE_getIFNo(face, subdivLevels, S1, x - edgeSize / 2, gridSize - 1); } else { face_edge_co = FACE_getIFCo(face, subdivLevels, S1, gridSize - 1, x - edgeSize / 2); face_edge_no = FACE_getIFNo(face, subdivLevels, S1, gridSize - 1, x - edgeSize / 2); } VertDataCopy(edge_co, face_edge_co, ss); if (do_normals) { VertDataCopy(edge_no, face_edge_no, ss); } } } } static void opensubdiv_evaluateGrids(CCGSubSurf *ss) { int i; for (i = 0; i < ss->fMap->curSize; i++) { CCGFace *face = (CCGFace *) ss->fMap->buckets[i]; for (; face; face = face->next) { if (face->numVerts == 4) { /* For quads we do special magic with converting face coords * into corner coords and interpolating grids from it. */ opensubdiv_evaluateQuadFaceGrids(ss, face, face->osd_index); } else { /* NGons and tris are split into separate osd faces which * evaluates onto grids directly. */ opensubdiv_evaluateNGonFaceGrids(ss, face, face->osd_index); } } } } CCGError ccgSubSurf_initOpenSubdivSync(CCGSubSurf *ss) { if (ss->syncState != eSyncState_None) { return eCCGError_InvalidSyncState; } ss->syncState = eSyncState_OpenSubdiv; return eCCGError_None; } void ccgSubSurf_prepareTopologyRefiner(CCGSubSurf *ss, DerivedMesh *dm) { if (ss->osd_mesh == NULL || ss->osd_mesh_invalid) { if (dm->getNumPolys(dm) != 0) { OpenSubdiv_Converter converter; ccgSubSurf_converter_setup_from_derivedmesh(ss, dm, &converter); /* TODO(sergey): Remove possibly previously allocated refiner. */ ss->osd_topology_refiner = openSubdiv_createTopologyRefinerDescr(&converter); ccgSubSurf_converter_free(&converter); } } /* Update number of grids, needed for things like final faces * counter, used by display drawing. */ { const int num_polys = dm->getNumPolys(dm); const MPoly *mpoly = dm->getPolyArray(dm); int poly; ss->numGrids = 0; for (poly = 0; poly < num_polys; ++poly) { ss->numGrids += mpoly[poly].totloop; } } { const int num_verts = dm->getNumVerts(dm); const MVert *mvert = dm->getVertArray(dm); int vert; if (ss->osd_coarse_coords != NULL && num_verts != ss->osd_num_coarse_coords) { MEM_freeN(ss->osd_coarse_coords); ss->osd_coarse_coords = NULL; } if (ss->osd_coarse_coords == NULL) { ss->osd_coarse_coords = MEM_mallocN(sizeof(float) * 6 * num_verts, "osd coarse positions"); } for (vert = 0; vert < num_verts; vert++) { copy_v3_v3(ss->osd_coarse_coords[vert * 2 + 0], mvert[vert].co); normal_short_to_float_v3(ss->osd_coarse_coords[vert * 2 + 1], mvert[vert].no); } ss->osd_num_coarse_coords = num_verts; ss->osd_coarse_coords_invalid = true; } } void ccgSubSurf__sync_opensubdiv(CCGSubSurf *ss) { BLI_assert(ss->meshIFC.numLayers == 2 || ss->meshIFC.numLayers == 3); /* Common synchronization steps */ ss->osd_compute = U.opensubdiv_compute_type; if (ss->skip_grids == false) { /* Make sure OSD evaluator is up-to-date. */ if (opensubdiv_ensureEvaluator(ss)) { /* Update coarse points in the OpenSubdiv evaluator. */ opensubdiv_updateEvaluatorCoarsePositions(ss); /* Evaluate opensubdiv mesh into the CCG grids. */ opensubdiv_evaluateGrids(ss); } } else { BLI_assert(ss->meshIFC.numLayers == 3); } #ifdef DUMP_RESULT_GRIDS ccgSubSurf__dumpCoords(ss); #endif } void ccgSubSurf_free_osd_mesh(CCGSubSurf *ss) { if (ss->osd_mesh != NULL) { ccgSubSurf__delete_osdGLMesh(ss->osd_mesh); ss->osd_mesh = NULL; } if (ss->osd_vao != 0) { glDeleteVertexArrays(1, &ss->osd_vao); ss->osd_vao = 0; } } void ccgSubSurf_getMinMax(CCGSubSurf *ss, float r_min[3], float r_max[3]) { int i; BLI_assert(ss->skip_grids == true); if (ss->osd_num_coarse_coords == 0) { zero_v3(r_min); zero_v3(r_max); } for (i = 0; i < ss->osd_num_coarse_coords; i++) { /* Coarse coordinates has normals interleaved into the array. */ DO_MINMAX(ss->osd_coarse_coords[2 * i], r_min, r_max); } } /* ** Delayed delete routines ** */ typedef struct OsdDeletePendingItem { struct OsdDeletePendingItem *next, *prev; OpenSubdiv_GLMesh *osd_mesh; unsigned int vao; } OsdDeletePendingItem; static SpinLock delete_spin; static ListBase delete_pool = {NULL, NULL}; static void delete_pending_push(OpenSubdiv_GLMesh *osd_mesh, unsigned int vao) { OsdDeletePendingItem *new_entry = MEM_mallocN(sizeof(OsdDeletePendingItem), "opensubdiv delete entry"); new_entry->osd_mesh = osd_mesh; new_entry->vao = vao; BLI_spin_lock(&delete_spin); BLI_addtail(&delete_pool, new_entry); BLI_spin_unlock(&delete_spin); } void ccgSubSurf__delete_osdGLMesh(OpenSubdiv_GLMesh *osd_mesh) { if (BLI_thread_is_main()) { openSubdiv_deleteOsdGLMesh(osd_mesh); } else { delete_pending_push(osd_mesh, 0); } } void ccgSubSurf__delete_vertex_array(unsigned int vao) { if (BLI_thread_is_main()) { glDeleteVertexArrays(1, &vao); } else { delete_pending_push(NULL, vao); } } void ccgSubSurf__delete_pending(void) { OsdDeletePendingItem *entry; BLI_assert(BLI_thread_is_main()); BLI_spin_lock(&delete_spin); for (entry = delete_pool.first; entry != NULL; entry = entry->next) { if (entry->osd_mesh != NULL) { openSubdiv_deleteOsdGLMesh(entry->osd_mesh); } if (entry->vao != 0) { glDeleteVertexArrays(1, &entry->vao); } } BLI_freelistN(&delete_pool); BLI_spin_unlock(&delete_spin); } void ccgSubSurf__sync_subdivUvs(CCGSubSurf *ss, bool subdiv_uvs) { ss->osd_subdiv_uvs = subdiv_uvs; } /* ** Public API ** */ void BKE_subsurf_osd_init(void) { openSubdiv_init(GPU_legacy_support()); BLI_spin_init(&delete_spin); } void BKE_subsurf_free_unused_buffers(void) { ccgSubSurf__delete_pending(); } void BKE_subsurf_osd_cleanup(void) { openSubdiv_cleanup(); ccgSubSurf__delete_pending(); BLI_spin_end(&delete_spin); } #endif /* WITH_OPENSUBDIV */
correlation_teams.c
/** * correlation.c This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <sys/time.h> #include <omp.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define ERROR_THRESHOLD 1.05 /* Problem size */ #define M 1024 #define N 1024 #define sqrt_of_array_cell(x,j) sqrt(x[j]) #define FLOAT_N 3214212.01f #define EPS 0.005f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* data) { int i, j; for (i=0; i < (M+1); i++) { for (j=0; j< (N+1); j++) { data[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1); } } } void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat) { int i, j, j1, j2; // Determine mean of column vectors of input data matrix for (j = 1; j < (M+1); j++) { mean[j] = 0.0; for (i = 1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } //mean[j] /= (float)FLOAT_N; mean[j] /= (DATA_TYPE)FLOAT_N; } // Determine standard deviations of column vectors of data matrix. for (j = 1; j < (M+1); j++) { stddev[j] = 0.0; for (i = 1; i < (N+1); i++) { stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]); } stddev[j] /= FLOAT_N; stddev[j] = sqrt_of_array_cell(stddev, j); stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j]; } //i - threadIdx.x, j = threadIdx.y // Center and reduce the column vectors. for (i = 1; i < (N+1); i++) { for (j = 1; j < (M+1); j++) { data[i*(M+1) + j] -= mean[j]; data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ; } } // Calculate the m * m correlation matrix. for (j1 = 1; j1 < M; j1++) { symmat[j1*(M+1) + j1] = 1.0; for (j2 = j1+1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for (i = 1; i < (N+1); i++) { symmat[j1*(M+1) + j2] += (data[i*(M+1) + j1] * data[i*(M+1) + j2]); } symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; } } symmat[M*(M+1) + M] = 1.0; } void correlation_OMP(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat) { int i, j, k; #pragma omp target enter data map(to: data[:(M+1)*(N+1)], mean[:(M+1)], stddev[:(M+1)]) map(to: symmat[:(M+1)*(N+1)]) #pragma omp target #pragma omp teams distribute parallel for for (j = 1; j < (M+1); j++) { mean[j] = 0.0; int i; for (i = 1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } #pragma omp target #pragma omp teams distribute parallel for for (j = 1; j < (M+1); j++) { stddev[j] = 0.0; int i; for (i = 1; i < (N+1); i++) { stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]); } stddev[j] /= FLOAT_N; stddev[j] = sqrt(stddev[j]); if(stddev[j] <=EPS) { stddev[j] = 1.0; } } #pragma omp target #pragma omp teams distribute parallel for collapse(2) for (i = 1; i < (N+1); i++) { for (j = 1; j < (M+1); j++) { data[i*(M+1) + j] -= mean[j]; data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ; } } #pragma omp target #pragma omp teams distribute parallel for for (k = 1; k < M; k++) { symmat[k*(M+1) + k] = 1.0; int j; for (j = k+1; j < (M+1); j++) { symmat[k*(M+1) + j] = 0.0; int i; for (i = 1; i < (N+1); i++) { symmat[k*(M+1) + j] += (data[i*(M+1) + k] * data[i*(M+1) + j]); } symmat[j*(M+1) + k] = symmat[k*(M+1) + j]; } } #pragma omp target update from(symmat[:(M+1)*(N+1)]) symmat[M*(M+1) + M] = 1.0; } void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { int i,j,fail; fail = 0; for (i=1; i < (M+1); i++) { for (j=1; j < (N+1); j++) { if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > ERROR_THRESHOLD) { fail++; //printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], symmat_GPU[i*N + j]); } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail); } int main() { double t_start, t_end; DATA_TYPE* data; DATA_TYPE* mean; DATA_TYPE* stddev; DATA_TYPE* symmat; DATA_TYPE* symmat_GPU; data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); stddev = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); symmat = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); symmat_GPU = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); fprintf(stdout, "<< Correlation Computation >>\n"); init_arrays(data); t_start = rtclock(); correlation_OMP(data, mean, stddev, symmat_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); correlation(data, mean, stddev, symmat); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(symmat, symmat_GPU); free(data); free(mean); free(stddev); free(symmat); free(symmat_GPU); return 0; }
mixed_tentusscher_myo_epi_2004_S2_7.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_7.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5108357840011,0.00130598320935367,0.778297936322614,0.778168249814233,0.000176184942496385,0.484492639570884,0.00295234515201219,0.999998328994369,1.95207361603797e-08,1.90559907730747e-05,0.999779205501714,1.00683281960406,0.999990079738075,5.13012060586906e-05,1.20488538387603,8.59982990627790,140.667607494303}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.5180163805163,0.000327758478537637,0.000171474998724435,0.000691830399658185,0.287392821646320,0.166300577201109,0.184041710714562,3.74850836746364,0.0218547235879219,3.15306872250787,1099.99995030167,0.000576388720425406,0.124097096559342,0.0199999709711304,0.00444385627683816,2.41454672727958e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
bt_hash_type_192.c
/* * This software is Copyright (c) 2015 Sayantan Datta <std2048 at gmail dot com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. */ #ifdef HAVE_OPENCL #include <stdlib.h> #include <stdio.h> #include "bt_hash_types.h" uint192_t *loaded_hashes_192 = NULL; unsigned int *hash_table_192 = NULL; /* Assuming N < 0x7fffffff */ inline unsigned int modulo192_31b(uint192_t a, unsigned int N, uint64_t shift64, uint64_t shift128) { uint64_t p; p = (a.HI % N) * shift128; p += (a.MI % N) * shift64; p += a.LO % N; p %= N; return (unsigned int)p; } inline uint192_t add192(uint192_t a, unsigned int b) { uint192_t result; result.LO = a.LO + b; result.MI = a.MI + (result.LO < a.LO); result.HI = a.HI + (result.MI < a.MI); if (result.HI < a.HI) bt_warn("192 bit add overflow."); return result; } void allocate_ht_192(unsigned int num_loaded_hashes, unsigned int verbosity) { unsigned int i; if (bt_memalign_alloc((void **)&hash_table_192, 32, 6 * hash_table_size * sizeof(unsigned int))) bt_error("Couldn't allocate hash_table_192."); for (i = 0; i < hash_table_size; i++) hash_table_192[i] = hash_table_192[i + hash_table_size] = hash_table_192[i + 2 * hash_table_size] = hash_table_192[i + 3 * hash_table_size] = hash_table_192[i + 4 * hash_table_size] = hash_table_192[i + 5 * hash_table_size] = 0; total_memory_in_bytes += 6 * hash_table_size * sizeof(unsigned int); if (verbosity > 2) { fprintf(stdout, "Hash Table Size %Lf %% of Number of Loaded Hashes.\n", ((long double)hash_table_size / (long double)num_loaded_hashes) * 100.00); fprintf(stdout, "Hash Table Size(in GBs):%Lf\n", ((long double)6 * hash_table_size * sizeof(unsigned int)) / ((long double)1024 * 1024 * 1024)); } } inline unsigned int calc_ht_idx_192(unsigned int hash_location, unsigned int offset) { return modulo192_31b(add192(loaded_hashes_192[hash_location], offset), hash_table_size, shift64_ht_sz, shift128_ht_sz); } inline unsigned int zero_check_ht_192(unsigned int hash_table_idx) { return (hash_table_192[hash_table_idx] || hash_table_192[hash_table_idx + hash_table_size] || hash_table_192[hash_table_idx + 2 * hash_table_size] || hash_table_192[hash_table_idx + 3 * hash_table_size] || hash_table_192[hash_table_idx + 4 * hash_table_size] || hash_table_192[hash_table_idx + 5 * hash_table_size]); } inline void assign_ht_192(unsigned int hash_table_idx, unsigned int hash_location) { uint192_t hash = loaded_hashes_192[hash_location]; hash_table_192[hash_table_idx] = (unsigned int)(hash.LO & 0xffffffff); hash_table_192[hash_table_idx + hash_table_size] = (unsigned int)(hash.LO >> 32); hash_table_192[hash_table_idx + 2 * hash_table_size] = (unsigned int)(hash.MI & 0xffffffff); hash_table_192[hash_table_idx + 3 * hash_table_size] = (unsigned int)(hash.MI >> 32); hash_table_192[hash_table_idx + 4 * hash_table_size] = (unsigned int)(hash.HI & 0xffffffff); hash_table_192[hash_table_idx + 5 * hash_table_size] = (unsigned int)(hash.HI >> 32); } inline void assign0_ht_192(unsigned int hash_table_idx) { hash_table_192[hash_table_idx] = hash_table_192[hash_table_idx + hash_table_size] = hash_table_192[hash_table_idx + 2 * hash_table_size] = hash_table_192[hash_table_idx + 3 * hash_table_size] = hash_table_192[hash_table_idx + 4 * hash_table_size] = hash_table_192[hash_table_idx + 5 * hash_table_size] = 0; } unsigned int get_offset_192(unsigned int hash_table_idx, unsigned int hash_location) { unsigned int z = modulo192_31b(loaded_hashes_192[hash_location], hash_table_size, shift64_ht_sz, shift128_ht_sz); return (hash_table_size - z + hash_table_idx); } int test_tables_192(unsigned int num_loaded_hashes, OFFSET_TABLE_WORD *offset_table, unsigned int offset_table_size, unsigned int shift64_ot_sz, unsigned int shift128_ot_sz, unsigned int verbosity) { unsigned char *hash_table_collisions; unsigned int i, hash_table_idx, error = 1, count = 0; uint192_t hash; if (verbosity > 1) fprintf(stdout, "\nTesting Tables..."); if (bt_calloc((void **)&hash_table_collisions, hash_table_size, sizeof(unsigned char))) bt_error("Failed to allocate memory: hash_table_collisions."); #if _OPENMP #pragma omp parallel private(i, hash_table_idx, hash) #endif { #if _OPENMP #pragma omp for #endif for (i = 0; i < num_loaded_hashes; i++) { hash = loaded_hashes_192[i]; hash_table_idx = calc_ht_idx_192(i, (unsigned int)offset_table[ modulo192_31b(hash, offset_table_size, shift64_ot_sz, shift128_ot_sz)]); #if _OPENMP #pragma omp atomic #endif hash_table_collisions[hash_table_idx]++; if (error && (hash_table_192[hash_table_idx] != (unsigned int)(hash.LO & 0xffffffff) || hash_table_192[hash_table_idx + hash_table_size] != (unsigned int)(hash.LO >> 32) || hash_table_192[hash_table_idx + 2 * hash_table_size] != (unsigned int)(hash.MI & 0xffffffff) || hash_table_192[hash_table_idx + 3 * hash_table_size] != (unsigned int)(hash.MI >> 32) || hash_table_192[hash_table_idx + 4 * hash_table_size] != (unsigned int)(hash.HI & 0xffffffff) || hash_table_192[hash_table_idx + 5 * hash_table_size] != (unsigned int)(hash.HI >> 32) || hash_table_collisions[hash_table_idx] > 1)) { fprintf(stderr, "Error building tables: Loaded hash Idx:%u, No. of Collosions:%u\n", i, hash_table_collisions[hash_table_idx]); error = 0; } } #if _OPENMP #pragma omp single #endif for (hash_table_idx = 0; hash_table_idx < hash_table_size; hash_table_idx++) if (zero_check_ht_192(hash_table_idx)) count++; #if _OPENMP #pragma omp barrier #endif } if (count != num_loaded_hashes) { error = 0; fprintf(stderr, "Error!! Tables contains extra or less entries.\n"); return 0; } bt_free((void **)&hash_table_collisions); if (error && verbosity > 1) fprintf(stdout, "OK\n"); return 1; } #define check_equal(p, q) \ (loaded_hashes_192[p].LO == loaded_hashes_192[q].LO && \ loaded_hashes_192[p].MI == loaded_hashes_192[q].MI && \ loaded_hashes_192[p].HI == loaded_hashes_192[q].HI) #define check_non_zero(p) \ (loaded_hashes_192[p].LO || loaded_hashes_192[p].MI || loaded_hashes_192[p].HI) #define check_zero(p) \ (loaded_hashes_192[p].LO == 0 && loaded_hashes_192[p].MI == 0 && loaded_hashes_192[p].HI == 0) #define set_zero(p) \ loaded_hashes_192[p].LO = loaded_hashes_192[p].MI = loaded_hashes_192[p].HI = 0 static void remove_duplicates_final(unsigned int num_loaded_hashes, unsigned int hash_table_size, unsigned int *rehash_list) { unsigned int i, **hash_location_list, counter; #define COLLISION_DTYPE unsigned int COLLISION_DTYPE *collisions; typedef struct { unsigned int store_loc1; unsigned int store_loc2; unsigned int idx_hash_loc_list; COLLISION_DTYPE collisions; COLLISION_DTYPE iter; } hash_table_data; hash_table_data *hash_table = NULL; if (bt_malloc((void **)&hash_table, hash_table_size * sizeof(hash_table_data))) bt_error("Failed to allocate memory: hash_table."); if (bt_calloc((void **)&collisions, hash_table_size, sizeof(COLLISION_DTYPE))) bt_error("Failed to allocate memory: collisions."); for (i = 0; i < num_loaded_hashes; i++) { unsigned int idx = loaded_hashes_192[rehash_list[i]].LO % hash_table_size; collisions[idx]++; } counter = 0; for (i = 0; i < hash_table_size; i++) { hash_table[i].collisions = collisions[i]; hash_table[i].iter = 0; hash_table[i].store_loc1 = hash_table[i].store_loc2 = hash_table[i].idx_hash_loc_list = 0xffffffff; if (hash_table[i].collisions > 3) hash_table[i].idx_hash_loc_list = counter++; } if (bt_malloc((void **)&hash_location_list, (counter + 1) * sizeof(unsigned int *))) bt_error("Failed to allocate memory: hash_location_list."); counter = 0; for (i = 0; i < hash_table_size; i++) if (collisions[i] > 3) { if (bt_malloc((void **)&hash_location_list[counter], (collisions[i] - 1) * sizeof(unsigned int))) bt_error("Failed to allocate memory: hash_location_list[counter]."); counter++; } for (i = 0; i < num_loaded_hashes; i++) { unsigned int k = rehash_list[i]; unsigned int idx = loaded_hashes_192[k].LO % hash_table_size ; if (collisions[idx] == 2) { if (!hash_table[idx].iter) { hash_table[idx].iter++; hash_table[idx].store_loc1 = k; } else if (check_equal(hash_table[idx].store_loc1, k)) set_zero(k); } if (collisions[idx] == 3) { if (!hash_table[idx].iter) { hash_table[idx].iter++; hash_table[idx].store_loc1 = k; } else if (hash_table[idx].iter == 1) { if (check_equal(hash_table[idx].store_loc1, k)) set_zero(k); else hash_table[idx].store_loc2 = k; } else if (check_equal(hash_table[idx].store_loc1, k) || check_equal(hash_table[idx].store_loc2, k)) set_zero(k); } else if (collisions[idx] > 3) { unsigned int iter = hash_table[idx].iter; if (!iter) hash_location_list[hash_table[idx].idx_hash_loc_list][iter++] = k; else { unsigned int j; for (j = 0; j < iter; j++) if (check_equal(hash_location_list[hash_table[idx].idx_hash_loc_list][j], k)) { set_zero(k); break; } if (j == iter && iter < (unsigned int)hash_table[idx].collisions - 1) hash_location_list[hash_table[idx].idx_hash_loc_list][iter++] = k; } hash_table[idx].iter = iter; } } #undef COLLISION_DTYPE for (i = 0; i < counter; i++) bt_free((void **)&hash_location_list[i]); bt_free((void **)&hash_location_list); bt_free((void **)&hash_table); bt_free((void **)&collisions); } unsigned int remove_duplicates_192(unsigned int num_loaded_hashes, unsigned int hash_table_size, unsigned int verbosity) { unsigned int i, num_unique_hashes, *rehash_list, counter; #define COLLISION_DTYPE unsigned int COLLISION_DTYPE *collisions; typedef struct { unsigned int store_loc1; unsigned int store_loc2; unsigned int store_loc3; COLLISION_DTYPE iter; } hash_table_data; hash_table_data *hash_table = NULL; if (verbosity > 1) fprintf(stdout, "Removing duplicate hashes..."); if (hash_table_size & (hash_table_size - 1)) { fprintf(stderr, "Duplicate removal hash table size must power of 2.\n"); return 0; } if (bt_malloc((void **)&hash_table, hash_table_size * sizeof(hash_table_data))) bt_error("Failed to allocate memory: hash_table."); if (bt_calloc((void **)&collisions, hash_table_size, sizeof(COLLISION_DTYPE))) bt_error("Failed to allocate memory: collisions."); #if _OPENMP #pragma omp parallel private(i) #endif { #if _OPENMP #pragma omp for #endif for (i = 0; i < num_loaded_hashes; i++) { unsigned int idx = loaded_hashes_192[i].LO & (hash_table_size - 1); #if _OPENMP #pragma omp atomic #endif collisions[idx]++; } counter = 0; #if _OPENMP #pragma omp barrier #pragma omp for #endif for (i = 0; i < hash_table_size; i++) { hash_table[i].iter = 0; if (collisions[i] > 4) #if _OPENMP #pragma omp atomic #endif counter += (collisions[i] - 3); } #if _OPENMP #pragma omp barrier #pragma omp sections #endif { #if _OPENMP #pragma omp section #endif { for (i = 0; i < num_loaded_hashes; i++) { unsigned int idx = loaded_hashes_192[i].LO & (hash_table_size - 1); if (collisions[idx] == 2) { if (!hash_table[idx].iter) { hash_table[idx].iter++; hash_table[idx].store_loc1 = i; } else if (check_equal(hash_table[idx].store_loc1, i)) set_zero(i); } } } #if _OPENMP #pragma omp section #endif { if (bt_malloc((void **)&rehash_list, counter * sizeof(unsigned int))) bt_error("Failed to allocate memory: rehash_list."); counter = 0; for (i = 0; i < num_loaded_hashes; i++) { unsigned int idx = loaded_hashes_192[i].LO & (hash_table_size - 1); if (collisions[idx] == 3) { if (!hash_table[idx].iter) { hash_table[idx].iter++; hash_table[idx].store_loc1 = i; } else if (hash_table[idx].iter == 1) { if (check_equal(hash_table[idx].store_loc1, i)) set_zero(i); else { hash_table[idx].iter++; hash_table[idx].store_loc2 = i; } } else if (check_equal(hash_table[idx].store_loc1, i) || check_equal(hash_table[idx].store_loc2, i)) set_zero(i); } else if (collisions[idx] >= 4) { if (!hash_table[idx].iter) { hash_table[idx].iter++; hash_table[idx].store_loc1 = i; } else if (hash_table[idx].iter == 1) { if (check_equal(hash_table[idx].store_loc1, i)) set_zero(i); else { hash_table[idx].iter++; hash_table[idx].store_loc2 = i; } } else if (hash_table[idx].iter == 2) { if (check_equal(hash_table[idx].store_loc1, i) || check_equal(hash_table[idx].store_loc2, i)) set_zero(i); else { hash_table[idx].iter++; hash_table[idx].store_loc3 = i; } } else if (hash_table[idx].iter >= 3) { if (check_equal(hash_table[idx].store_loc1, i) || check_equal(hash_table[idx].store_loc2, i) || check_equal(hash_table[idx].store_loc3, i)) set_zero(i); else { if (collisions[idx] > 4) rehash_list[counter++] = i; } } } } if (counter) remove_duplicates_final(counter, counter + (counter >> 1), rehash_list); bt_free((void **)&rehash_list); } } } #if 0 { unsigned int col1 = 0, col2 = 0, col3 = 0, col4 = 0, col5a = 0; for (i = 0; i < hash_table_size; i++) { if (collisions[i] == 1) col1++; else if (collisions[i] == 2) col2++; else if (collisions[i] == 3) col3++; else if (collisions[i] == 4) col4++; else if (collisions[i] > 4) col5a += collisions[i]; } col2 *= 2; col3 *= 3; col4 *= 4; fprintf(stderr, "Statistics:%Lf %Lf %Lf %Lf %Lf\n", (long double)col1 / (long double)num_loaded_hashes, (long double)col2 / (long double)num_loaded_hashes, (long double)col3 / (long double)num_loaded_hashes, (long double)col4 / (long double)num_loaded_hashes, (long double)col5a / (long double)num_loaded_hashes); } #endif num_unique_hashes = 0; for (i = num_loaded_hashes - 1; (int)i >= 0; i--) if (check_non_zero(i)) { num_unique_hashes = i; break; } for (i = 0; i <= num_unique_hashes; i++) if (check_zero(i)) { unsigned int j; loaded_hashes_192[i] = loaded_hashes_192[num_unique_hashes]; set_zero(num_unique_hashes); num_unique_hashes--; for (j = num_unique_hashes; (int)j >= 0; j--) if (check_non_zero(j)) { num_unique_hashes = j; break; } } #undef COLLISION_DTYPE bt_free((void **)&collisions); bt_free((void **)&hash_table); if (verbosity > 1) fprintf(stdout, "Done\n"); return (num_unique_hashes + 1); } #endif
CG.h
/** * This file contains (modified) code from the Eigen library. * Eigen License: * * Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> * Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com> * * This Source Code Form is subject to the terms of the Mozilla * Public License v. 2.0. If a copy of the MPL was not distributed * with this file, You can obtain one at http://mozilla.org/MPL/2.0/. * * * ====================== * * The modifications are part of the Eigen Recursive Matrix Extension (ERME). * ERME License: * * Copyright (c) 2019 Darius Rückert * Licensed under the MIT License. */ #pragma once #include "../Core.h" #include "../Core/ParallelHelper.h" #include "Cholesky.h" namespace Eigen::Recursive { template <typename _Scalar> class RecursiveDiagonalPreconditioner { typedef _Scalar Scalar; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; public: typedef typename Vector::StorageIndex StorageIndex; enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic }; RecursiveDiagonalPreconditioner() : m_isInitialized(false) {} void resize(int N) { m_invdiag.resize(N); } template <typename MatType> explicit RecursiveDiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols()) { compute(mat); } Eigen::Index rows() const { return m_invdiag.size(); } Eigen::Index cols() const { return m_invdiag.size(); } template <typename MatType> RecursiveDiagonalPreconditioner& analyzePattern(const MatType&) { return *this; } // Sparse Matrix Initialization template <typename Scalar, int options> // RecursiveDiagonalPreconditioner& factorize(const MatType& mat) RecursiveDiagonalPreconditioner& factorize(const SparseMatrix<Scalar, options>& mat) { using MatType = SparseMatrix<Scalar, options>; m_invdiag.resize(mat.cols()); for (int j = 0; j < mat.outerSize(); ++j) { typename MatType::InnerIterator it(mat, j); while (it && it.index() != j) ++it; if (it && it.index() == j) // m_invdiag(j) = Scalar(1)/it.value(); removeMatrixScalar(m_invdiag(j)) = removeMatrixScalar(inverseCholesky(it.value())); else // m_invdiag(j) = Scalar(1); removeMatrixScalar(m_invdiag(j)) = removeMatrixScalar(MultiplicativeNeutral<Scalar>::get()); } m_isInitialized = true; return *this; } // Dense Matrix Initialization template <typename MatType> RecursiveDiagonalPreconditioner& factorize(const MatType& mat) { m_invdiag.resize(mat.cols()); for (int j = 0; j < mat.outerSize(); ++j) { removeMatrixScalar(m_invdiag(j)) = removeMatrixScalar(inverseCholesky(mat(j, j))); } m_isInitialized = true; return *this; } template <typename T> RecursiveDiagonalPreconditioner& factorize(const Eigen::DiagonalMatrix<T, -1>& mat) { auto N = mat.rows(); if (m_invdiag.rows() != N) { std::terminate(); m_invdiag.resize(N); } #pragma omp for for (int j = 0; j < N; ++j) { m_invdiag(j) = inverseCholesky(mat.diagonal()(j)); } m_isInitialized = true; return *this; } template <typename MatType> RecursiveDiagonalPreconditioner& compute(const MatType& mat) { return factorize(mat); } /** \internal */ template <typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { // x = m_invdiag.array() * b.array(); #pragma omp for for (int i = 0; i < b.rows(); ++i) { x(i) = m_invdiag(i) * b(i); } } template <typename Rhs> inline const Eigen::Solve<RecursiveDiagonalPreconditioner, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); eigen_assert(m_invdiag.size() == b.rows() && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); return Eigen::Solve<RecursiveDiagonalPreconditioner, Rhs>(*this, b.derived()); } Eigen::ComputationInfo info() { return Eigen::Success; } const auto& getDiagElement(int i) const { return m_invdiag(i); } Vector m_invdiag; protected: bool m_isInitialized; }; //#define RM_CG_DEBUG_OUTPUT /** * A conjugate gradient solver, which works for recursives matrices. * Solve: * A * x = b for x * * The matrix A is given as function (for example a lambda function). * This way we can implement an implicit cg solver, which does not construct the full matrix A. * * Example call: * * // Build preconditioner * RecursiveDiagonalPreconditioner<MatrixScalar<Block>> P; * Eigen::Index iters = 50; * Scalar tol = 1e-50; * P.compute(S); * * // Solve with explicit matrix S * DAType tmp(n); * recursive_conjugate_gradient( * [&](const DAType& v) { * tmp = S * v; * return tmp; * }, * ej, da, P, iters, tol); * */ template <typename MultFunction, typename Rhs, typename Dest, typename Preconditioner, typename SuperScalar> EIGEN_DONT_INLINE void recursive_conjugate_gradient(const MultFunction& applyA, const Rhs& rhs, Dest& x, const Preconditioner& precond, Eigen::Index& iters, SuperScalar& tol_error) { // Typedefs using namespace Eigen; using std::abs; using std::sqrt; typedef SuperScalar RealScalar; typedef SuperScalar Scalar; typedef Rhs VectorType; // Temp Vector variables Index n = rhs.rows(); #ifdef RM_CG_DEBUG_OUTPUT std::cout << "Starting recursive CG" << std::endl; std::cout << "Iterations: " << iters << std::endl; std::cout << "Tolerance: " << tol_error << std::endl; std::cout << "N: " << n << std::endl; #endif #if 0 // Create them locally VectorType z(n); VectorType p(n); #else // Use static variables so a repeated call with the same size doesn't allocate memory static thread_local VectorType z; static thread_local VectorType p; static thread_local VectorType residual; z.resize(n); p.resize(n); residual.resize(n); #endif RealScalar tol = tol_error; Index maxIters = iters; applyA(x, residual); residual = rhs - residual; RealScalar rhsNorm2 = squaredNorm(rhs); if (rhsNorm2 == 0) { x.setZero(); iters = 0; tol_error = 0; return; } RealScalar threshold = tol * tol * rhsNorm2; RealScalar residualNorm2 = squaredNorm(residual); #ifdef RM_CG_DEBUG_OUTPUT std::cout << "Initial residual: " << residualNorm2 << std::endl; #endif if (residualNorm2 < threshold) { iters = 0; tol_error = sqrt(residualNorm2 / rhsNorm2); return; } p = precond.solve(residual); // initial search direction // the square of the absolute value of r scaled by invM RealScalar absNew = dot(residual, p); #ifdef RM_CG_DEBUG_OUTPUT std::cout << "dot(r,p): " << absNew << std::endl; #endif Index i = 0; while (i < maxIters) { // std::cout << "CG Residual " << i << ": " << residualNorm2 << std::endl; applyA(p, z); // the amount we travel on dir Scalar alpha = absNew / dot(p, z); // update solution x += scalarMult(p, alpha); // update residual residual -= scalarMult(z, alpha); residualNorm2 = squaredNorm(residual); #ifdef RM_CG_DEBUG_OUTPUT std::cout << "Iteration: " << i << " Residual: " << residualNorm2 << " Alpha: " << alpha << std::endl; #endif if (residualNorm2 < threshold) break; z = precond.solve(residual); // approximately solve for "A z = residual" // std::cout << expand(p).transpose() << std::endl; RealScalar absOld = absNew; absNew = dot(residual, z); // update the absolute value of r RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction // std::cout << "absnew " << absNew << " beta " << beta << std::endl; p = z + scalarMult(p, beta); // update search direction i++; } tol_error = sqrt(residualNorm2 / rhsNorm2); iters = i; } #if defined(_OPENMP) template <typename T> struct alignas(64) CacheAlignedValues { T data; }; template <typename T> inline double accumulate(const T& v) { double d = 0; for (auto& v : v) { d += v.data; } return d; } // Multi threaded implementation template <typename MultFunction, typename Rhs, typename Dest, typename Preconditioner, typename SuperScalar> EIGEN_DONT_INLINE void recursive_conjugate_gradient_OMP(const MultFunction& applyA, const Rhs& rhs, Dest& x, const Preconditioner& precond, Eigen::Index& iters, SuperScalar& tol_error) { // Typedefs using namespace Eigen; using std::abs; using std::sqrt; typedef SuperScalar RealScalar; typedef SuperScalar Scalar; typedef Rhs VectorType; // Temp Vector variables Index n = rhs.rows(); // Use static variables so a repeated call with the same size doesn't allocate memory static VectorType z; static VectorType p; static VectorType residual; static std::vector<CacheAlignedValues<Scalar>> tmpResults1, tmpResults; # pragma omp single { z.resize(n); p.resize(n); residual.resize(n); tmpResults1.resize(omp_get_num_threads()); tmpResults.resize(omp_get_num_threads()); } int tid = omp_get_thread_num(); RealScalar tol = tol_error; Index maxIters = iters; applyA(x, residual); # pragma omp for for (int i = 0; i < n; ++i) { residual(i) = rhs(i) - residual(i); } // tmpResults[tid] = squaredNorm_omp(rhs); squaredNorm_omp_local(rhs, tmpResults[tid].data); RealScalar rhsNorm2 = accumulate(tmpResults); if (rhsNorm2 == 0) { // x.setZero(); # pragma omp for for (int i = 0; i < n; ++i) { x(i).get().setZero(); } iters = 0; tol_error = 0; maxIters = 0; } RealScalar threshold = tol * tol * rhsNorm2; squaredNorm_omp_local(residual, tmpResults1[tid].data); RealScalar residualNorm2 = accumulate(tmpResults1); // RealScalar residualNorm2 = squaredNorm(residual); if (residualNorm2 < threshold) { iters = 0; tol_error = sqrt(residualNorm2 / rhsNorm2); maxIters = 0; } p = precond.solve(residual); // initial search direction dot_omp_local(residual, p, tmpResults[tid].data); RealScalar absNew = accumulate(tmpResults); Index i = 0; while (i < maxIters) { // std::cout << "CG Residual " << i << ": " << residualNorm2 << std::endl; applyA(p, z); dot_omp_local(p, z, tmpResults1[tid].data); Scalar dotpz = accumulate(tmpResults1); Scalar alpha = absNew / dotpz; # pragma omp for for (int i = 0; i < n; ++i) { // the amount we travel on dir // update solution x(i) += p(i) * alpha; // update residual residual(i) -= z(i) * alpha; } squaredNorm_omp_local(residual, tmpResults[tid].data); residualNorm2 = accumulate(tmpResults); if (residualNorm2 < threshold) break; z = precond.solve(residual); // approximately solve for "A z = residual" RealScalar absOld = absNew; dot_omp_local(residual, z, tmpResults[tid].data); absNew = accumulate(tmpResults); RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction // std::cout << "absnew " << absNew << " beta " << beta << std::endl; # pragma omp for for (int i = 0; i < n; ++i) { p(i) = z(i) + p(i) * beta; // update search direction } i++; } tol_error = sqrt(residualNorm2 / rhsNorm2); iters = i; } #endif } // namespace Eigen::Recursive
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Check if module import may be found in the current context, /// emit error if not. void diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, unsigned &Offset, llvm::InlineAsmIdentifierInfo &Info, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// \brief Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, SourceLocation DepLinLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_LastResort, // Lowest priority. Only in effect if // LangOpts.CUDADisableTargetCallChecks is true. CFP_Fallback, // Low priority caller/callee combination CFP_Best, // Preferred caller/callee combination }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<FunctionDecl *> &Matches); void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<DeclAccessPair> &Matches); void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartImpl(CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinMSVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % John Cristy % % October 1996 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/xml-tree.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image, % const size_t width,const size_t height, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o offset: the mean offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p, *restrict r; register IndexPacket *restrict threshold_indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImageChannel method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold) % MagickBooleanType BilevelImageChannel(Image *image, % const ChannelType channel,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: define the threshold values. % % Aside: You can get the same results as operator using LevelImageChannels() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold) { MagickBooleanType status; status=BilevelImageChannel(image,DefaultChannels,threshold); return(status); } MagickExport MagickBooleanType BilevelImageChannel(Image *image, const ChannelType channel,const double threshold) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,RGBColorspace); /* Bilevel threshold image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,(MagickRealType) PixelIntensityToQuantum(image,q) <= threshold ? 0 : QuantumRange); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? 0 : QuantumRange); else SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? OpaqueOpacity : TransparentOpacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BilevelImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image,const char *threshold) % MagickBooleanType BlackThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=BlackThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType intensity; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } intensity=MagickPixelIntensity(&threshold); if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,RGBColorspace); /* Black threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (GetPixelIntensity(image,q) < intensity) { SetPixelRed(q,0); SetPixelGreen(q,0); SetPixelBlue(q,0); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,0); } } else { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) < threshold.red)) SetPixelRed(q,0); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) < threshold.green)) SetPixelGreen(q,0); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) < threshold.blue)) SetPixelBlue(q,0); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) < threshold.opacity)) SetPixelOpacity(q,0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x) < threshold.index)) SetPixelIndex(indexes+x,0); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlackThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImageChannel method is: % % MagickBooleanType ClampImage(Image *image) % MagickBooleanType ClampImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % */ static inline Quantum ClampPixel(const MagickRealType value) { #if !defined(MAGICKCORE_HDRI_SUPPORT) return((Quantum) value); #else if (value < 0.0f) return(0.0f); if (value >= (MagickRealType) QuantumRange) return((Quantum) QuantumRange); return(value); #endif } MagickExport MagickBooleanType ClampImage(Image *image) { MagickBooleanType status; status=ClampImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType ClampImageChannel(Image *image, const ChannelType channel) { #define ClampImageTag "Clamp/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,ClampPixel(GetPixelRed(q))); SetPixelGreen(q,ClampPixel(GetPixelGreen(q))); SetPixelBlue(q,ClampPixel(GetPixelBlue(q))); SetPixelOpacity(q,ClampPixel(GetPixelOpacity(q))); q++; } return(SyncImage(image)); } /* Clamp image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixel(GetPixelBlue(q))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampPixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampPixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClampImageChannel) #endif proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMapFile(const char *xml, const char *filename,const char *map_id,ExceptionInfo *exception) { const char *attr, *content; double value; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; map = (ThresholdMap *)NULL; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *)NULL ) return(map); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *)NULL; threshold = GetNextXMLTreeTag(threshold) ) { attr = GetXMLTreeAttribute(threshold, "map"); if ( (attr != (char *)NULL) && (LocaleCompare(map_id,attr) == 0) ) break; attr = GetXMLTreeAttribute(threshold, "alias"); if ( (attr != (char *)NULL) && (LocaleCompare(map_id,attr) == 0) ) break; } if ( threshold == (XMLTreeInfo *)NULL ) { return(map); } description = GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); return(map); } levels = GetXMLTreeChild(threshold,"levels"); if ( levels == (XMLTreeInfo *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); return(map); } /* The map has been found -- Allocate a Threshold Map to return */ map = (ThresholdMap *)AcquireMagickMemory(sizeof(ThresholdMap)); if ( map == (ThresholdMap *)NULL ) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); map->map_id = (char *)NULL; map->description = (char *)NULL; map->levels = (ssize_t *) NULL; /* Assign Basic Attributes */ attr = GetXMLTreeAttribute(threshold, "map"); if ( attr != (char *)NULL ) map->map_id = ConstantString(attr); content = GetXMLTreeContent(description); if ( content != (char *)NULL ) map->description = ConstantString(content); attr = GetXMLTreeAttribute(levels, "width"); if ( attr == (char *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } map->width = StringToUnsignedLong(attr); if ( map->width == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } attr = GetXMLTreeAttribute(levels, "height"); if ( attr == (char *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } map->height = StringToUnsignedLong(attr); if ( map->height == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } attr = GetXMLTreeAttribute(levels, "divisor"); if ( attr == (char *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } map->divisor = (ssize_t) StringToLong(attr); if ( map->divisor < 2 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } /* Allocate theshold levels array */ content = GetXMLTreeContent(levels); if ( content == (char *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if ( map->levels == (ssize_t *)NULL ) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); { /* parse levels into integer array */ ssize_t i; char *p; for( i=0; i< (ssize_t) (map->width*map->height); i++) { map->levels[i] = (ssize_t)strtol(content, &p, 10); if ( p == content ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"", map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } if ( map->levels[i] < 0 || map->levels[i] > map->divisor ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds = DestroyXMLTree(thresholds); map = DestroyThresholdMap(map); return(map); } content = p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } } thresholds = DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() load and search one or more threshold map files for the % a map matching the given name or aliase. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; ThresholdMap *map; map=(ThresholdMap *)NULL; options=GetConfigureOptions(ThresholdsFilename,exception); while (( option=(const StringInfo *) GetNextValueInLinkedList(options) ) != (const StringInfo *) NULL && map == (ThresholdMap *)NULL ) map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); options=DestroyConfigureOptions(options); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { XMLTreeInfo *thresholds,*threshold,*description; const char *map,*alias,*content; assert( xml != (char *)NULL ); assert( file != (FILE *)NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *)NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *)NULL; threshold = GetNextXMLTreeTag(threshold) ) { map = GetXMLTreeAttribute(threshold, "map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias = GetXMLTreeAttribute(threshold, "alias"); /* alias is optional, no if test needed */ description=GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if ( content == (char *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickFalse; if ( file == (FILE *)NULL ) file = stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); while ( ( option=(const StringInfo *) GetNextValueInLinkedList(options) ) != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPATH: %s\n\n",GetStringInfoPath(option)); status|=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() uses the ordered dithering technique of reducing color % images to monochrome using positional information to retain as much % information as possible. % % WARNING: This function is deprecated, and is now just a call to % the more more powerful OrderedPosterizeImage(); function. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image) % MagickBooleanType OrderedDitherImageChannel(Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image) { MagickBooleanType status; status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception); return(status); } MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image, const ChannelType channel,ExceptionInfo *exception) { MagickBooleanType status; /* Call the augumented function OrderedPosterizeImage() */ status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedPosterizeImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedPosterizeImage method is: % % MagickBooleanType OrderedPosterizeImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % MagickBooleanType OrderedPosterizeImageChannel(Image *image, % const ChannelType channel,const char *threshold_map, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedPosterizeImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { MagickBooleanType status; status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map, exception); return(status); } MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image, const ChannelType channel,const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; LongPixelPacket levels; MagickBooleanType status; MagickOffsetType progress; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); { char token[MaxTextExtent]; register const char *p; p=(char *)threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MaxTextExtent-1)) break; token[p-threshold_map] = *p; p++; } token[p-threshold_map] = '\0'; map = GetThresholdMap(token, exception); if ( map == (ThresholdMap *)NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } } /* Set channel levels from extra comma separated arguments Default to 2, the single value given, or individual channel values */ #if 1 { /* parse directly as a comma separated list of integers */ char *p; p = strchr((char *) threshold_map,','); if ( p != (char *)NULL && isdigit((int) ((unsigned char) *(++p))) ) levels.index = (unsigned int) strtoul(p, &p, 10); else levels.index = 2; levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0; levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0; levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0; levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0; levels.index = ((channel & IndexChannel) != 0 && (image->colorspace == CMYKColorspace)) ? levels.index : 0; /* if more than a single number, each channel has a separate value */ if ( p != (char *) NULL && *p == ',' ) { p=strchr((char *) threshold_map,','); p++; if ((channel & RedChannel) != 0) levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & GreenChannel) != 0) levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & BlueChannel) != 0) levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace) levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & OpacityChannel) != 0) levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); } } #else /* Parse level values as a geometry */ /* This difficult! * How to map GeometryInfo structure elements into * LongPixelPacket structure elements, but according to channel? * Note the channels list may skip elements!!!! * EG -channel BA -ordered-dither map,2,3 * will need to map g.rho -> l.blue, and g.sigma -> l.opacity * A simpler way is needed, probably converting geometry to a temporary * array, then using channel to advance the index into ssize_t pixel packet. */ #endif #if 0 printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n", levels.red, levels.green, levels.blue, levels.opacity, levels.index); #endif { /* Do the posterized ordered dithering of the image */ ssize_t d; /* d = number of psuedo-level divisions added between color levels */ d = map->divisor-1; /* reduce levels to levels - 1 */ levels.red = levels.red ? levels.red-1 : 0; levels.green = levels.green ? levels.green-1 : 0; levels.blue = levels.blue ? levels.blue-1 : 0; levels.opacity = levels.opacity ? levels.opacity-1 : 0; levels.index = levels.index ? levels.index-1 : 0; if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t threshold, t, l; /* Figure out the dither threshold for this pixel This must be a integer from 1 to map->divisor-1 */ threshold = map->levels[(x%map->width) +map->width*(y%map->height)]; /* Dither each channel in the image as appropriate Notes on the integer Math... total number of divisions = (levels-1)*(divisor-1)+1) t1 = this colors psuedo_level = q->red * total_divisions / (QuantumRange+1) l = posterization level 0..levels t = dither threshold level 0..divisor-1 NB: 0 only on last Each color_level is of size QuantumRange / (levels-1) NB: All input levels and divisor are already had 1 subtracted Opacity is inverted so 'off' represents transparent. */ if (levels.red) { t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1)); l = t/d; t = t-l*d; SetPixelRed(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red))); } if (levels.green) { t = (ssize_t) (QuantumScale*GetPixelGreen(q)* (levels.green*d+1)); l = t/d; t = t-l*d; SetPixelGreen(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green))); } if (levels.blue) { t = (ssize_t) (QuantumScale*GetPixelBlue(q)* (levels.blue*d+1)); l = t/d; t = t-l*d; SetPixelBlue(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue))); } if (levels.opacity) { t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))* (levels.opacity*d+1)); l = t/d; t = t-l*d; SetPixelOpacity(q,ClampToQuantum((MagickRealType) ((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/ levels.opacity))); } if (levels.index) { t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)* (levels.index*d+1)); l = t/d; t = t-l*d; SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+ (t>=threshold))*(MagickRealType) QuantumRange/levels.index))); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OrderedPosterizeImageChannel) #endif proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImageChannel method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon) % MagickBooleanType PerceptibleImageChannel(Image *image, % const ChannelType channel,const double epsilon) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); } MagickExport MagickBooleanType PerceptibleImageChannel(Image *image, const ChannelType channel,const double epsilon) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); q++; } return(SyncImage(image)); } /* Perceptible image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x), epsilon)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PerceptibleImageChannel) #endif proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImageChannel(Image *image, % const char *thresholds,ExceptionInfo *exception) % MagickBooleanType RandomThresholdImageChannel(Image *image, % const ChannelType channel,const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing low,high thresholds. If the % string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4 % is performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { MagickBooleanType status; status=RandomThresholdImageChannel(image,DefaultChannels,thresholds, exception); return(status); } MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickStatusType flags; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType min_threshold, max_threshold; RandomInfo **restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (thresholds == (const char *) NULL) return(MagickTrue); GetMagickPixelPacket(image,&threshold); min_threshold=0.0; max_threshold=(MagickRealType) QuantumRange; flags=ParseGeometry(thresholds,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(thresholds,'%') != (char *) NULL) { max_threshold*=(MagickRealType) (0.01*QuantumRange); min_threshold*=(MagickRealType) (0.01*QuantumRange); } else if (((max_threshold == min_threshold) || (max_threshold == 1)) && (min_threshold <= 8)) { /* Backward Compatibility -- ordered-dither -- IM v 6.2.9-6. */ status=OrderedPosterizeImageChannel(image,channel,thresholds,exception); return(status); } /* Random threshold image. */ status=MagickTrue; progress=0; if (channel == CompositeChannels) { if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfoThreadSet(); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #endif image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { IndexPacket index; MagickRealType intensity; intensity=(MagickRealType) PixelIntensityToQuantum(image,q); if (intensity < min_threshold) threshold.index=min_threshold; else if (intensity > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType)(QuantumRange* GetPseudoRandomValue(random_info[id])); index=(IndexPacket) (intensity <= threshold.index ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } random_info=AcquireRandomInfoThreadSet(); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #endif image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if ((MagickRealType) GetPixelRed(q) < min_threshold) threshold.red=min_threshold; else if ((MagickRealType) GetPixelRed(q) > max_threshold) threshold.red=max_threshold; else threshold.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & GreenChannel) != 0) { if ((MagickRealType) GetPixelGreen(q) < min_threshold) threshold.green=min_threshold; else if ((MagickRealType) GetPixelGreen(q) > max_threshold) threshold.green=max_threshold; else threshold.green=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & BlueChannel) != 0) { if ((MagickRealType) GetPixelBlue(q) < min_threshold) threshold.blue=min_threshold; else if ((MagickRealType) GetPixelBlue(q) > max_threshold) threshold.blue=max_threshold; else threshold.blue=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & OpacityChannel) != 0) { if ((MagickRealType) GetPixelOpacity(q) < min_threshold) threshold.opacity=min_threshold; else if ((MagickRealType) GetPixelOpacity(q) > max_threshold) threshold.opacity=max_threshold; else threshold.opacity=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold) threshold.index=min_threshold; else if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold.opacity ? 0 : QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold.index ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold) % MagickBooleanType WhiteThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=WhiteThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType intensity; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(thresholds,&geometry_info); GetMagickPixelPacket(image,&threshold); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } intensity=MagickPixelIntensity(&threshold); if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,RGBColorspace); /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (GetPixelIntensity(image,q) > intensity) { SetPixelRed(q,QuantumRange); SetPixelGreen(q,QuantumRange); SetPixelBlue(q,QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,QuantumRange); } } else { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) > threshold.red)) SetPixelRed(q,QuantumRange); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) > threshold.green)) SetPixelGreen(q,QuantumRange); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) > threshold.blue)) SetPixelBlue(q,QuantumRange); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) > threshold.opacity)) SetPixelOpacity(q,QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index) SetPixelIndex(indexes+x,QuantumRange); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WhiteThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
GB_subref_slice.c
//------------------------------------------------------------------------------ // GB_subref_slice: construct coarse/fine tasks for C = A(I,J) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Determine the tasks for computing C=A(I,J). The matrix C has Cnvec vectors, // and these are divided into coarse and fine tasks. A coarse task will // compute one or more whole vectors of C. A fine task operates on a slice of // a single vector of C. The slice can be done by the # of entries in the // corresponding vector of A, or by the list of indices I, depending on how the // work is done for that method. // The (kC)th vector will access A(imin:imax,kA) in Ai,Ax [pA:pA_end-1], where // pA = Ap_start [kC] and pA_end = Ap_end [kC]. // The computation of each vector C(:,kC) = A(I,kA) is by done using one of 12 // different cases, depending on the vector, as determined by GB_subref_method. // Not all vectors in C are computed using the same method. // Note that J can have duplicates. kC is unique (0:Cnvec-1) but the // corresponding vector kA in A may repeat, if J has duplicates. Duplicates in // J are not exploited, since the coarse/fine tasks are constructed by slicing // slicing the list of vectors Ch of size Cnvec, not the vectors of A. // Compare this function with GB_ewise_slice, which constructs coarse/fine // tasks for the eWise operations (C=A+B, C=A.*B, and C<M>=Z). #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Coarse, int64_t) ; \ GB_FREE_WORK (&Cwork, Cwork_size) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_FREE_WORK (&TaskList, TaskList_size) ; \ GB_FREE_WORK (&Mark, Mark_size) ; \ GB_FREE_WORK (&Inext, Inext_size) ; \ } #include "GB_subref.h" GrB_Info GB_subref_slice // phase 1 of GB_subref ( // output: GB_task_struct **p_TaskList, // array of structs size_t *p_TaskList_size, // size of TaskList int *p_ntasks, // # of tasks constructed int *p_nthreads, // # of threads for subref operation bool *p_post_sort, // true if a final post-sort is needed int64_t *restrict *p_Mark, // for I inverse, if needed; size avlen size_t *p_Mark_size, int64_t *restrict *p_Inext, // for I inverse, if needed; size nI size_t *p_Inext_size, int64_t *p_nduplicates, // # of duplicates, if I inverse computed // from phase0: const int64_t *restrict Ap_start, // location of A(imin:imax,kA) const int64_t *restrict Ap_end, const int64_t Cnvec, // # of vectors of C const bool need_qsort, // true if C must be sorted const int Ikind, // GB_ALL, GB_RANGE, GB_STRIDE or GB_LIST const int64_t nI, // length of I const int64_t Icolon [3], // for GB_RANGE and GB_STRIDE // original input: const int64_t avlen, // A->vlen const int64_t anz, // nnz (A) const GrB_Index *I, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_TaskList != NULL) ; ASSERT (p_TaskList_size != NULL) ; ASSERT (p_ntasks != NULL) ; ASSERT (p_nthreads != NULL) ; ASSERT (p_post_sort != NULL) ; ASSERT (p_Mark != NULL) ; ASSERT (p_Inext != NULL) ; ASSERT (p_nduplicates != NULL) ; ASSERT ((Cnvec > 0) == (Ap_start != NULL)) ; ASSERT ((Cnvec > 0) == (Ap_end != NULL)) ; (*p_TaskList) = NULL ; (*p_TaskList_size) = 0 ; (*p_Mark ) = NULL ; (*p_Inext ) = NULL ; int64_t *restrict Mark = NULL ; size_t Mark_size = 0 ; int64_t *restrict Inext = NULL ; size_t Inext_size = 0 ; int64_t *restrict Cwork = NULL ; size_t Cwork_size = 0 ; GB_WERK_DECLARE (Coarse, int64_t) ; // size ntasks1+1 int ntasks1 = 0 ; GrB_Info info ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // allocate the initial TaskList //-------------------------------------------------------------------------- // Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow // later, if needed. Usually, 64*nthreads_max is enough, but in a few cases // fine tasks can cause this number to be exceeded. If that occurs, // TaskList is reallocated. // When the mask is present, it is often fastest to break the work up // into tasks, even when nthreads_max is 1. GB_task_struct *restrict TaskList = NULL ; size_t TaskList_size = 0 ; int max_ntasks = 0 ; int ntasks0 = (nthreads_max == 1) ? 1 : (32 * nthreads_max) ; GB_REALLOC_TASK_WORK (TaskList, ntasks0, max_ntasks) ; //-------------------------------------------------------------------------- // determine if I_inverse can be constructed //-------------------------------------------------------------------------- // I_inverse_ok is true if I might be inverted. If false, then I will not // be inverted. I can be inverted only if the workspace for the inverse // does not exceed nnz(A). Note that if I was provided on input as an // explicit list, but consists of a contiguous range imin:imax, then Ikind // is now GB_LIST and the list I is ignored. // If I_inverse_ok is true, the inverse of I might still not be needed. // need_I_inverse becomes true if any C(:,kC) = A (I,kA) computation // requires I inverse. int64_t I_inverse_limit = GB_IMAX (4096, anz) ; bool I_inverse_ok = (Ikind == GB_LIST && ((nI > avlen / 256) || ((nI + avlen) < I_inverse_limit))) ; bool need_I_inverse = false ; bool post_sort = false ; int64_t iinc = Icolon [GxB_INC] ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- Cwork = GB_MALLOC_WORK (Cnvec+1, int64_t, &Cwork_size) ; if (Cwork == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // estimate the work required for each vector of C //-------------------------------------------------------------------------- int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ; int64_t kC ; #pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static) \ reduction(||:need_I_inverse) for (kC = 0 ; kC < Cnvec ; kC++) { // jC is the (kC)th vector of C = A(I,J) // int64_t jC = GBH (Ch, kC) ; // C(:,kC) = A(I,kA) will be constructed int64_t pA = Ap_start [kC] ; int64_t pA_end = Ap_end [kC] ; int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j)) int64_t work ; // amount of work for C(:,kC) = A (I,kA) bool this_needs_I_inverse ; // true if this vector needs I inverse // ndupl in I not yet known; it is found when I is inverted. For // now, assume I has no duplicate entries. All that is needed for now // is the work required for each C(:,kC), and whether or not I inverse // must be created. The # of duplicates has no impact on the I inverse // decision, and a minor effect on the work (which is ignored). GB_subref_method (&work, &this_needs_I_inverse, alen, avlen, Ikind, nI, I_inverse_ok, need_qsort, iinc, 0) ; // log the result need_I_inverse = need_I_inverse || this_needs_I_inverse ; Cwork [kC] = work ; } //-------------------------------------------------------------------------- // replace Cwork with its cumulative sum //-------------------------------------------------------------------------- GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork, Context) ; double cwork = (double) Cwork [Cnvec] ; //-------------------------------------------------------------------------- // determine # of threads and tasks to use for C=A(I,J) //-------------------------------------------------------------------------- int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ; ntasks1 = (nthreads == 1) ? 1 : (32 * nthreads) ; double target_task_size = cwork / (double) (ntasks1) ; target_task_size = GB_IMAX (target_task_size, chunk) ; //-------------------------------------------------------------------------- // invert I if required //-------------------------------------------------------------------------- int64_t ndupl = 0 ; if (need_I_inverse) { GB_OK (GB_I_inverse (I, nI, avlen, &Mark, &Mark_size, &Inext, &Inext_size, &ndupl, Context)) ; ASSERT (Mark != NULL) ; ASSERT (Inext != NULL) ; } //-------------------------------------------------------------------------- // check for quick return for a single task //-------------------------------------------------------------------------- if (Cnvec == 0 || ntasks1 == 1) { // construct a single coarse task that computes all of C TaskList [0].kfirst = 0 ; TaskList [0].klast = Cnvec-1 ; // free workspace and return result GB_FREE_WORKSPACE ; (*p_TaskList ) = TaskList ; (*p_TaskList_size) = TaskList_size ; (*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ; (*p_nthreads ) = 1 ; (*p_post_sort ) = false ; (*p_Mark ) = Mark ; (*p_Mark_size ) = Mark_size ; (*p_Inext ) = Inext ; (*p_Inext_size ) = Inext_size ; (*p_nduplicates) = ndupl ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // slice the work into coarse tasks //-------------------------------------------------------------------------- GB_WERK_PUSH (Coarse, ntasks1 + 1, int64_t) ; if (Coarse == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (Coarse, Cwork, Cnvec, ntasks1, false) ; //-------------------------------------------------------------------------- // construct all tasks, both coarse and fine //-------------------------------------------------------------------------- int ntasks = 0 ; for (int t = 0 ; t < ntasks1 ; t++) { //---------------------------------------------------------------------- // coarse task computes C (:,k:klast) //---------------------------------------------------------------------- int64_t k = Coarse [t] ; int64_t klast = Coarse [t+1] - 1 ; if (k >= Cnvec) { //------------------------------------------------------------------ // all tasks have been constructed //------------------------------------------------------------------ break ; } else if (k < klast) { //------------------------------------------------------------------ // coarse task has 2 or more vectors //------------------------------------------------------------------ // This is a non-empty coarse-grain task that does two or more // entire vectors of C, vectors k:klast, inclusive. GB_REALLOC_TASK_WORK (TaskList, ntasks + 1, max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = klast ; ntasks++ ; } else { //------------------------------------------------------------------ // coarse task has 0 or 1 vectors //------------------------------------------------------------------ // As a coarse-grain task, this task is empty or does a single // vector, k. Vector k must be removed from the work done by this // and any other coarse-grain task, and split into one or more // fine-grain tasks. for (int tt = t ; tt < ntasks1 ; tt++) { // remove k from the initial slice tt if (Coarse [tt] == k) { // remove k from task tt Coarse [tt] = k+1 ; } else { // break, k not in task tt break ; } } //------------------------------------------------------------------ // determine the # of fine-grain tasks to create for vector k //------------------------------------------------------------------ double ckwork = Cwork [k+1] - Cwork [k] ; int nfine = ckwork / target_task_size ; nfine = GB_IMAX (nfine, 1) ; // make the TaskList bigger, if needed GB_REALLOC_TASK_WORK (TaskList, ntasks + nfine, max_ntasks) ; //------------------------------------------------------------------ // create the fine-grain tasks //------------------------------------------------------------------ if (nfine == 1) { //-------------------------------------------------------------- // this is a single coarse task for all of vector k //-------------------------------------------------------------- TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = k ; ntasks++ ; } else { //-------------------------------------------------------------- // slice vector k into nfine fine tasks //-------------------------------------------------------------- // There are two kinds of fine tasks, depending on the method // used to compute C(:,kC) = A(I,kA). If the method iterates // across all entries in A(imin:imax,kA), then those entries // are sliced (of size alen). Three methods (1, 2, and 6) // iterate across all entries in I instead (of size nI). int64_t pA = Ap_start [k] ; int64_t pA_end = Ap_end [k] ; int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j)) int method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI, I_inverse_ok, need_qsort, iinc, ndupl) ; if (method == 10) { // multiple fine tasks operate on a single vector C(:,kC) // using method 10, and so a post-sort is needed. post_sort = true ; } if (method == 1 || method == 2 || method == 6) { // slice I for this task nfine = GB_IMIN (nfine, nI) ; nfine = GB_IMAX (nfine, 1) ; for (int tfine = 0 ; tfine < nfine ; tfine++) { // flag this as a fine task, and record the method. // Methods 1, 2, and 6 slice I, not A(:,kA) TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -method ; // do not partition A(:,kA) TaskList [ntasks].pA = pA ; TaskList [ntasks].pA_end = pA_end ; // partition I for this task GB_PARTITION (TaskList [ntasks].pB, TaskList [ntasks].pB_end, nI, tfine, nfine) ; // unused TaskList [ntasks].pM = -1 ; TaskList [ntasks].pM_end = -1 ; // no post sort TaskList [ntasks].len = 0 ; ntasks++ ; } } else { // slice A(:,kA) for this task nfine = GB_IMIN (nfine, alen) ; nfine = GB_IMAX (nfine, 1) ; bool reverse = (method == 8 || method == 9) ; for (int tfine = 0 ; tfine < nfine ; tfine++) { // flag this as a fine task, and record the method. // These methods slice A(:,kA). Methods 8 and 9 // must do so in reverse order. TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -method ; // partition the items for this task GB_PARTITION (TaskList [ntasks].pA, TaskList [ntasks].pA_end, alen, (reverse) ? (nfine-tfine-1) : tfine, nfine) ; TaskList [ntasks].pA += pA ; TaskList [ntasks].pA_end += pA ; // do not partition I TaskList [ntasks].pB = 0 ; TaskList [ntasks].pB_end = nI ; // unused TaskList [ntasks].pM = -1 ; TaskList [ntasks].pM_end = -1 ; // flag the task that does the post sort TaskList [ntasks].len = (tfine == 0 && method == 10) ; ntasks++ ; } } } } } ASSERT (ntasks > 0) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; (*p_TaskList ) = TaskList ; (*p_TaskList_size) = TaskList_size ; (*p_ntasks ) = ntasks ; (*p_nthreads ) = nthreads ; (*p_post_sort ) = post_sort ; (*p_Mark ) = Mark ; (*p_Mark_size ) = Mark_size ; (*p_Inext ) = Inext ; (*p_Inext_size ) = Inext_size ; (*p_nduplicates) = ndupl ; return (GrB_SUCCESS) ; }
selu_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: hhchen@openailab.com */ #include "selu_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_selu_fp32(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param, int num_thread) { float* data = ( float* )input_tensor->data; float* out_data = ( float* )output_tensor->data; float alpha = selu_param->alpha; float lambda = selu_param->lambda; float alpha_lambda = alpha * lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; float* input_data = ( float* )input_tensor->data + i * chan_size; float* output_data = ( float* )output_tensor->data + i * chan_size; for (int i = 0; i < chan_size; i++) { if (input_data[i] < 0.f) output_data[i] = (exp(input_data[i]) - 1.f) * alpha_lambda; else output_data[i] = input_data[i] * lambda; } } return 0; } int ref_selu_uint8(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param, int num_thread) { /* dequant */ uint8_t* input_uint8 = input_tensor->data; uint8_t* output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float* input_data = ( float* )sys_malloc(input_size * sizeof(float)); float* output_data = ( float* )sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_data[i] = (( float )input_uint8[i] - ( float )input_zero) * input_scale; } float alpha = selu_param->alpha; float lambda = selu_param->lambda; float alpha_lambda = alpha * lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; float* input_data = ( float* )input_tensor->data + i * chan_size; float* output_data = ( float* )output_tensor->data + i * chan_size; for (int i = 0; i < chan_size; i++) { if (input_data[i] < 0.f) output_data[i] = (exp(input_data[i]) - 1.f) * alpha_lambda; else output_data[i] = input_data[i] * lambda; } } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(output_data[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_data); sys_free(output_data); return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct selu_param* selu_param = ( struct selu_param* )ir_node->op.param_mem; int num_thread = exec_graph->num_thread; int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_selu_fp32(output_tensor, input_tensor, selu_param, num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_selu_uint8(output_tensor, input_tensor, selu_param, num_thread); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW) return 0; return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_selu_ref_op() { return register_builtin_node_ops(OP_SELU, &hcl_node_ops); } int unregister_selu_ref_op() { return unregister_builtin_node_ops(OP_SELU, &hcl_node_ops); }
GB_binop__bshift_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bshift_uint32 // A.*B function (eWiseMult): GB_AemultB__bshift_uint32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bshift_uint32 // C+=b function (dense accum): GB_Cdense_accumb__bshift_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_uint32 // C=scalar+B GB_bind1st__bshift_uint32 // C=scalar+B' GB_bind1st_tran__bshift_uint32 // C=A+scalar GB_bind2nd__bshift_uint32 // C=A'+scalar GB_bind2nd_tran__bshift_uint32 // C type: uint32_t // A type: uint32_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint32 (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_bitshift_uint32 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT32 || GxB_NO_BSHIFT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bshift_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bshift_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bshift_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bshift_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bshift_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bshift_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_uint32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bshift_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = GB_bitshift_uint32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint32 (x, aij) ; \ } GrB_Info GB_bind1st_tran__bshift_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint32 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__bshift_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cones.c
#include "cones.h" #ifdef LAPACK_LIB_FOUND #include <cblas.h> #include <lapacke.h> #endif void projectsdc(double * X, int n, Work * w); /* in place projection (with branches) */ void projCone(double *x, Cone * k, Work * w, int iter) { int i; int count; /* project onto positive orthant */ for(i = k->f; i < k->f+k->l; ++i) { if(x[i] < 0.0) x[i] = 0.0; //x[i] = (x[i] < 0.0) ? 0.0 : x[i]; } count = k->l+k->f; /* project onto SOC */ for(i = 0; i < k->qsize; ++i) { double v1 = x[count]; double s = calcNorm(&(x[count+1]),k->q[i]-1); double alpha = (s + v1)/2.0; if(s <= v1) { /* do nothing */ } else if (s <= - v1) { memset(&(x[count]), 0, k->q[i]*sizeof(double)); } else { x[count] = alpha; scaleArray(&(x[count+1]), alpha/s, k->q[i]-1); //cblas_dscal(k->q[i]-1, alpha/s, &(x[count+1]),1); } count += k->q[i]; } #ifdef LAPACK_LIB_FOUND /* project onto PSD cone */ for (i=0; i < k->ssize; ++i){ projectsdc(&(x[count]),k->s[i],w); count += (k->s[i])*(k->s[i]); } #else if(k->ssize > 0){ coneOS_printf("WARNING: solving SDP, no lapack library specified in makefile!\n"); coneOS_printf("ConeOS will return a wrong answer!\n"); } #endif /* * exponential cone is not self dual, if s \in K * then y \in K^* and so if K is the primal cone * here we project onto K^*, via Moreau */ scaleArray(&(x[count]), -1, 3*k->ep); // x = -x; double r,s,t; int idx; #pragma omp parallel for private(r,s,t,idx) for (i=0; i < k->ep; ++i) { idx = count + 3*i; r = x[idx]; s = x[idx+1]; t = x[idx+2]; projExpCone(&(x[idx])); x[idx] -= r; x[idx+1] -= s; x[idx+2] -= t; } count += 3*k->ep; // exponential cone: #pragma omp parallel for for (i=0; i < k->ed; ++i) { projExpCone(&(x[count + 3*i])); } count += 3*k->ed; /* project onto OTHER cones */ } double expNewtonOneD(double rho, double y_hat, double z_hat) { double t = fmax( -z_hat , 1e-6); double f, fp; for (int i=0; i<100; ++i){ f = t * (t + z_hat) / rho / rho - y_hat/rho + log(t/rho) + 1; fp = (2 * t + z_hat) / rho / rho + 1/t; t = t - f/fp; if (t <= -z_hat) { return 0; } else if (t <= 0) { return z_hat; } else if ( fabs(f) < 1e-9 ) { break; } } return t + z_hat; } void expSolveForXWithRho(double * v, double * x, double rho) { x[2] = expNewtonOneD(rho, v[1], v[2]); x[1] = (x[2] - v[2]) * x[2] / rho; x[0] = v[0] - rho; } double expCalcGrad(double * v, double * x, double rho) { expSolveForXWithRho(v, x, rho); if (x[1] <= 1e-12) { return x[0]; } else { return x[0] + x[1] * log( x[1] / x[2] ); } } void expGetRhoUb(double * v, double * x, double * ub, double * lb) { *lb = 0; *ub = 0.125; while(expCalcGrad(v, x, *ub) > 0) { *lb = *ub; (*ub) *= 2; } } // project onto the exponential cone, v has dimension *exactly* 3 void projExpCone(double * v) { double r = v[0], s = v[1], t = v[2]; // v in cl(Kexp) if( (s*exp(r/s) <= t && s > 0) || (r <= 0 && s == 0 && t >= 0) ) { return; } // -v in Kexp^* if ( (-r < 0 && r*exp(s/r) <= -exp(1)*t) || (-r == 0 && -s >= 0 && -t >= 0) ) { memset(v, 0, 3*sizeof(double)); return; } // special case with analytical solution if(r < 0 && s < 0) { v[1] = 0.0; v[2] = fmax(v[2],0); return; } double ub, lb, rho, g, x[3]; expGetRhoUb(v, x, &ub, &lb); for(int i = 0; i < 100; ++i){ rho = (ub + lb)/2; g = expCalcGrad(v, x, rho); if (g > 0) { lb = rho; } else{ ub = rho; } if (ub - lb < 1e-9) { break; } } v[0] = x[0]; v[1] = x[1]; v[2] = x[2]; } #ifdef LAPACK_LIB_FOUND void projectsdc(double *X, int n, Work * w) { /* project onto the positive semi-definite cone */ if (n == 1) { if(X[0] < 0.0) X[0] = 0.0; return; } int i, j, m=0; double * Xs = w->Xs; double * Z = w->Z; double * e = w->e; memcpy(Xs,X,n*n*sizeof(double)); // Xs = X + X', save div by 2 for eigen-recomp for (i = 0; i < n; ++i){ cblas_daxpy(n, 1, &(X[i]), n, &(Xs[i*n]), 1); //b_daxpy(n, 1, &(X[i]), n, &(Xs[i*n]), 1); } double EIG_TOL = 1e-8; double vupper = calcNorm(Xs,n*n); LAPACKE_dsyevr( LAPACK_COL_MAJOR, 'V', 'V', 'U', n, Xs, n, 0.0, vupper, -1, -1, EIG_TOL, &m, e, Z, n , NULL); //printf("m is %i, n is %i\n", m ,n); //printf("vupper is %f, max eig is %f\n",vupper, e[m>0 ? m-1:0]/2); memset(X, 0, n*n*sizeof(double)); for (i = 0; i < m; ++i) { cblas_dsyr(CblasColMajor, CblasLower, n, e[i]/2, &(Z[i*n]), 1, X, n); //b_dsyr('L', n, -e[i]/2, &(Z[i*n]), 1, Xs, n); } // fill in upper half for (i = 0; i < n; ++i){ for (j = i+1; j < n; ++j){ X[i + j*n] = X[j + i*n]; } } } #endif
pr79428-5.c
/* PR c/79428 */ /* { dg-options "-fopenmp" } */ #pragma omp ordered /* { dg-error "expected declaration specifiers before end of line" } */
GB_unop__minv_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_uint32_uint32 // op(A') function: GB_unop_tran__minv_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 32) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-distribute-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp distribute simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp distribute simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp distribute simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:4:9, col:28> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:4:9) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:10:9, col:28> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:10:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:17:9, col:40> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:29, col:39> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:38> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:38> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:17:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:24:9, col:40> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:29, col:39> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:38> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:38> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:24:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPDistributeSimdDirective {{.*}} <line:31:9, col:40> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:29, col:39> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:38> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:38> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:31:9) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
95a93_so12.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" #include <stdio.h> #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; double section2; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r73_vec, float *restrict r74_vec, float *restrict r75_vec, float *restrict r76_vec, float *restrict r77_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r131_vec, float **restrict r132_vec, const int time, const int tw); int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float(*r73)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void **)&r73, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); float(*r74)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void **)&r74, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); float(*r75)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void **)&r75, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); float(*r76)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void **)&r76, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); float(*r77)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void **)&r77, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); float **r131; posix_memalign((void **)&r131, 64, sizeof(float *) * nthreads); float **r132; posix_memalign((void **)&r132, 64, sizeof(float *) * nthreads); int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int xb_size = block_sizes[0]; int sf = 6; int t_blk_size = 2 * sf * (time_M - time_m); printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); posix_memalign((void **)&r131[tid], 64, sizeof(float[x0_blk0_size + 3 + 3][y0_blk0_size + 3 + 3][z_size + 3 + 3])); posix_memalign((void **)&r132[tid], 64, sizeof(float[x0_blk0_size + 3 + 3][y0_blk0_size + 3 + 3][z_size + 3 + 3])); } /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(static, 1) for (int x = x_m - 3; x <= x_M + 3; x += 1) { for (int y = y_m - 3; y <= y_M + 3; y += 1) { #pragma omp simd aligned(delta, phi, theta : 32) for (int z = z_m - 3; z <= z_M + 3; z += 1) { r73[x + 3][y + 3][z + 3] = sqrt(2 * delta[x + 12][y + 12][z + 12] + 1); r74[x + 3][y + 3][z + 3] = cos(theta[x + 12][y + 12][z + 12]); r75[x + 3][y + 3][z + 3] = sin(phi[x + 12][y + 12][z + 12]); r76[x + 3][y + 3][z + 3] = sin(theta[x + 12][y + 12][z + 12]); r77[x + 3][y + 3][z + 3] = cos(phi[x + 12][y + 12][z + 12]); } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m - 1; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m - 1; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size) { for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ bf0(damp_vec, dt, epsilon_vec, (float *)r73, (float *)r74, (float *)r75, (float *)r76, (float *)r77, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M, x_m, y_M, y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, (float **)r131, (float **)r132, time, tw); // x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m, /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000; } } } } #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); free(r131[tid]); free(r132[tid]); } free(r73); free(r74); free(r75); free(r76); free(r77); free(r131); free(r132); return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r73_vec, float *restrict r74_vec, float *restrict r75_vec, float *restrict r76_vec, float *restrict r77_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r131_vec, float **restrict r132_vec, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data; float(*restrict r73)[y_size + 3 + 3][z_size + 3 + 3] __attribute__((aligned(64))) = (float(*)[y_size + 3 + 3][z_size + 3 + 3]) r73_vec; float(*restrict r74)[y_size + 3 + 3][z_size + 3 + 3] __attribute__((aligned(64))) = (float(*)[y_size + 3 + 3][z_size + 3 + 3]) r74_vec; float(*restrict r75)[y_size + 3 + 3][z_size + 3 + 3] __attribute__((aligned(64))) = (float(*)[y_size + 3 + 3][z_size + 3 + 3]) r75_vec; float(*restrict r76)[y_size + 3 + 3][z_size + 3 + 3] __attribute__((aligned(64))) = (float(*)[y_size + 3 + 3][z_size + 3 + 3]) r76_vec; float(*restrict r77)[y_size + 3 + 3][z_size + 3 + 3] __attribute__((aligned(64))) = (float(*)[y_size + 3 + 3][z_size + 3 + 3]) r77_vec; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; float **r131 = (float **)r131_vec; float **r132 = (float **)r132_vec; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); float(*restrict r118)[y0_blk0_size + 3 + 3][z_size + 3 + 3] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 3 + 3][z_size + 3 + 3]) r131[tid]; float(*restrict r119)[y0_blk0_size + 3 + 3][z_size + 3 + 3] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 3 + 3][z_size + 3 + 3]) r132[tid]; #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0 - 3, xs = 0; x <= min(min((x_M + time), (xb + xb_size + 2)), (x0_blk0 + x0_blk0_size + 2)); x++, xs++) { for (int y = y0_blk0 - 3, ys = 0; y <= min(min((y_M + time), (yb + yb_size + 2)), (y0_blk0 + y0_blk0_size + 2)); y++, ys++) { #pragma omp simd aligned(u, v : 64) for (int z = z_m - 3; z <= z_M + 3; z += 1) { r118[xs][ys][z + 3] = -(1.66666669e-3F * (-u[t0][x - time + 9][y - time + 12][z + 12] + u[t0][x - time + 15][y - time + 12][z + 12]) + 1.50000002e-2F * (u[t0][x - time + 10][y - time + 12][z + 12] - u[t0][x - time + 14][y - time + 12][z + 12]) + 7.50000011e-2F * (-u[t0][x - time + 11][y - time + 12][z + 12] + u[t0][x - time + 13][y - time + 12][z + 12])) * r76[x - time + 3][y - time + 3][z + 3] * r77[x - time + 3][y - time + 3][z + 3] - (1.66666669e-3F * (-u[t0][x - time + 12][y - time + 9][z + 12] + u[t0][x - time + 12][y - time + 15][z + 12]) + 1.50000002e-2F * (u[t0][x - time + 12][y - time + 10][z + 12] - u[t0][x - time + 12][y - time + 14][z + 12]) + 7.50000011e-2F * (-u[t0][x - time + 12][y - time + 11][z + 12] + u[t0][x - time + 12][y - time + 13][z + 12])) * r75[x - time + 3][y - time + 3][z + 3] * r76[x - time + 3][y - time + 3][z + 3] - (1.66666669e-3F * (-u[t0][x - time + 12][y - time + 12][z + 9] + u[t0][x - time + 12][y - time + 12][z + 15]) + 1.50000002e-2F * (u[t0][x - time + 12][y - time + 12][z + 10] - u[t0][x - time + 12][y - time + 12][z + 14]) + 7.50000011e-2F * (-u[t0][x - time + 12][y - time + 12][z + 11] + u[t0][x - time + 12][y - time + 12][z + 13])) * r74[x - time + 3][y - time + 3][z + 3]; r119[xs][ys][z + 3] = -(1.66666669e-3F * (-v[t0][x - time + 9][y - time + 12][z + 12] + v[t0][x - time + 15][y - time + 12][z + 12]) + 1.50000002e-2F * (v[t0][x - time + 10][y - time + 12][z + 12] - v[t0][x - time + 14][y - time + 12][z + 12]) + 7.50000011e-2F * (-v[t0][x - time + 11][y - time + 12][z + 12] + v[t0][x - time + 13][y - time + 12][z + 12])) * r76[x - time + 3][y - time + 3][z + 3] * r77[x - time + 3][y - time + 3][z + 3] - (1.66666669e-3F * (-v[t0][x - time + 12][y - time + 9][z + 12] + v[t0][x - time + 12][y - time + 15][z + 12]) + 1.50000002e-2F * (v[t0][x - time + 12][y - time + 10][z + 12] - v[t0][x - time + 12][y - time + 14][z + 12]) + 7.50000011e-2F * (-v[t0][x - time + 12][y - time + 11][z + 12] + v[t0][x - time + 12][y - time + 13][z + 12])) * r75[x - time + 3][y - time + 3][z + 3] * r76[x - time + 3][y - time + 3][z + 3] - (1.66666669e-3F * (-v[t0][x - time + 12][y - time + 12][z + 9] + v[t0][x - time + 12][y - time + 12][z + 15]) + 1.50000002e-2F * (v[t0][x - time + 12][y - time + 12][z + 10] - v[t0][x - time + 12][y - time + 12][z + 14]) + 7.50000011e-2F * (-v[t0][x - time + 12][y - time + 12][z + 11] + v[t0][x - time + 12][y - time + 12][z + 13])) * r74[x - time + 3][y - time + 3][z + 3]; } } } for (int x = x0_blk0, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++) { for (int y = y0_blk0, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++) { #pragma omp simd aligned(damp, epsilon, u, v, vp : 64) for (int z = z_m; z <= z_M; z += 1) { float r130 = 1.0 / dt; float r129 = 1.0 / (dt * dt); float r128 = 1.50000002e-2F * (-r119[xs + 1][ys + 3][z + 3] * r76[x - time + 1][y - time + 3][z + 3] * r77[x - time + 1][y - time + 3][z + 3] - r119[xs + 3][ys + 1][z + 3] * r75[x - time + 3][y - time + 1][z + 3] * r76[x - time + 3][y - time + 1][z + 3] - r119[xs + 3][ys + 3][z + 1] * r74[x - time + 3][y - time + 3][z + 1] + r119[xs + 3][ys + 3][z + 5] * r74[x - time + 3][y - time + 3][z + 5] + r119[xs + 3][ys + 5][z + 3] * r75[x - time + 3][y - time + 5][z + 3] * r76[x - time + 3][y - time + 5][z + 3] + r119[xs + 5][ys + 3][z + 3] * r76[x - time + 5][y - time + 3][z + 3] * r77[x - time + 5][y - time + 3][z + 3]); float r127 = 1.66666669e-3F * (r119[xs][ys + 3][z + 3] * r76[x - time][y - time + 3][z + 3] * r77[x - time][y - time + 3][z + 3] + r119[xs + 3][ys][z + 3] * r75[x - time + 3][y - time][z + 3] * r76[x - time + 3][y - time][z + 3] + r119[xs + 3][ys + 3][z] * r74[x - time + 3][y - time + 3][z] - r119[xs + 3][ys + 3][z + 6] * r74[x - time + 3][y - time + 3][z + 6] - r119[xs + 3][ys + 6][z + 3] * r75[x - time + 3][y - time + 6][z + 3] * r76[x - time + 3][y - time + 6][z + 3] - r119[xs + 6][ys + 3][z + 3] * r76[x - time + 6][y - time + 3][z + 3] * r77[x - time + 6][y - time + 3][z + 3]); float r126 = 7.50000011e-2F * (r119[xs + 2][ys + 3][z + 3] * r76[x - time + 2][y - time + 3][z + 3] * r77[x - time + 2][y - time + 3][z + 3] + r119[xs + 3][ys + 2][z + 3] * r75[x - time + 3][y - time + 2][z + 3] * r76[x - time + 3][y - time + 2][z + 3] + r119[xs + 3][ys + 3][z + 2] * r74[x - time + 3][y - time + 3][z + 2] - r119[xs + 3][ys + 3][z + 4] * r74[x - time + 3][y - time + 3][z + 4] - r119[xs + 3][ys + 4][z + 3] * r75[x - time + 3][y - time + 4][z + 3] * r76[x - time + 3][y - time + 4][z + 3] - r119[xs + 4][ys + 3][z + 3] * r76[x - time + 4][y - time + 3][z + 3] * r77[x - time + 4][y - time + 3][z + 3]); float r125 = 1.0 / (vp[x - time + 12][y - time + 12][z + 12] * vp[x - time + 12][y - time + 12][z + 12]); float r124 = 1.0 / (r125 * r129 + r130 * damp[x - time + 1][y - time + 1][z + 1]); float r123 = 1.66666669e-3F * (-r118[xs][ys + 3][z + 3] * r76[x - time][y - time + 3][z + 3] * r77[x - time][y - time + 3][z + 3] - r118[xs + 3][ys][z + 3] * r75[x - time + 3][y - time][z + 3] * r76[x - time + 3][y - time][z + 3] - r118[xs + 3][ys + 3][z] * r74[x - time + 3][y - time + 3][z] + r118[xs + 3][ys + 3][z + 6] * r74[x - time + 3][y - time + 3][z + 6] + r118[xs + 3][ys + 6][z + 3] * r75[x - time + 3][y - time + 6][z + 3] * r76[x - time + 3][y - time + 6][z + 3] + r118[xs + 6][ys + 3][z + 3] * r76[x - time + 6][y - time + 3][z + 3] * r77[x - time + 6][y - time + 3][z + 3]) + 1.50000002e-2F * (r118[xs + 1][ys + 3][z + 3] * r76[x - time + 1][y - time + 3][z + 3] * r77[x - time + 1][y - time + 3][z + 3] + r118[xs + 3][ys + 1][z + 3] * r75[x - time + 3][y - time + 1][z + 3] * r76[x - time + 3][y - time + 1][z + 3] + r118[xs + 3][ys + 3][z + 1] * r74[x - time + 3][y - time + 3][z + 1] - r118[xs + 3][ys + 3][z + 5] * r74[x - time + 3][y - time + 3][z + 5] - r118[xs + 3][ys + 5][z + 3] * r75[x - time + 3][y - time + 5][z + 3] * r76[x - time + 3][y - time + 5][z + 3] - r118[xs + 5][ys + 3][z + 3] * r76[x - time + 5][y - time + 3][z + 3] * r77[x - time + 5][y - time + 3][z + 3]) + 7.50000011e-2F * (-r118[xs + 2][ys + 3][z + 3] * r76[x - time + 2][y - time + 3][z + 3] * r77[x - time + 2][y - time + 3][z + 3] - r118[xs + 3][ys + 2][z + 3] * r75[x - time + 3][y - time + 2][z + 3] * r76[x - time + 3][y - time + 2][z + 3] - r118[xs + 3][ys + 3][z + 2] * r74[x - time + 3][y - time + 3][z + 2] + r118[xs + 3][ys + 3][z + 4] * r74[x - time + 3][y - time + 3][z + 4] + r118[xs + 3][ys + 4][z + 3] * r75[x - time + 3][y - time + 4][z + 3] * r76[x - time + 3][y - time + 4][z + 3] + r118[xs + 4][ys + 3][z + 3] * r76[x - time + 4][y - time + 3][z + 3] * r77[x - time + 4][y - time + 3][z + 3]) - 6.01250588e-7F * (u[t0][x - time + 6][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 6][z + 12] + u[t0][x - time + 12][y - time + 12][z + 6] + u[t0][x - time + 12][y - time + 12][z + 18] + u[t0][x - time + 12][y - time + 18][z + 12] + u[t0][x - time + 18][y - time + 12][z + 12]) + 1.03896102e-5F * (u[t0][x - time + 7][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 7][z + 12] + u[t0][x - time + 12][y - time + 12][z + 7] + u[t0][x - time + 12][y - time + 12][z + 17] + u[t0][x - time + 12][y - time + 17][z + 12] + u[t0][x - time + 17][y - time + 12][z + 12]) - 8.92857123e-5F * (u[t0][x - time + 8][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 8][z + 12] + u[t0][x - time + 12][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 12][z + 16] + u[t0][x - time + 12][y - time + 16][z + 12] + u[t0][x - time + 16][y - time + 12][z + 12]) + 5.29100517e-4F * (u[t0][x - time + 9][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 9][z + 12] + u[t0][x - time + 12][y - time + 12][z + 9] + u[t0][x - time + 12][y - time + 12][z + 15] + u[t0][x - time + 12][y - time + 15][z + 12] + u[t0][x - time + 15][y - time + 12][z + 12]) - 2.67857137e-3F * (u[t0][x - time + 10][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 10][z + 12] + u[t0][x - time + 12][y - time + 12][z + 10] + u[t0][x - time + 12][y - time + 12][z + 14] + u[t0][x - time + 12][y - time + 14][z + 12] + u[t0][x - time + 14][y - time + 12][z + 12]) + 1.71428568e-2F * (u[t0][x - time + 11][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 11][z + 12] + u[t0][x - time + 12][y - time + 12][z + 11] + u[t0][x - time + 12][y - time + 12][z + 13] + u[t0][x - time + 12][y - time + 13][z + 12] + u[t0][x - time + 13][y - time + 12][z + 12]) - 8.94833313e-2F * u[t0][x - time + 12][y - time + 12][z + 12]; float r116 = r129 * (-2.0F * u[t0][x - time + 12][y - time + 12][z + 12] + u[t1][x - time + 12][y - time + 12][z + 12]); float r117 = r129 * (-2.0F * v[t0][x - time + 12][y - time + 12][z + 12] + v[t1][x - time + 12][y - time + 12][z + 12]); u[t2][x - time + 12][y - time + 12][z + 12] = r124 * ((-r116) * r125 + r123 * (2 * epsilon[x - time + 12][y - time + 12][z + 12] + 1) + r130 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 12][y - time + 12][z + 12]) + (r126 + r127 + r128) * r73[x - time + 3][y - time + 3][z + 3]); v[t2][x - time + 12][y - time + 12][z + 12] = r124 * ((-r117) * r125 + r123 * r73[x - time + 3][y - time + 3][z + 3] + r126 + r127 + r128 + r130 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 12][y - time + 12][z + 12])); } int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1; for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r22 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 12][y - time + 12][zind + 12] += r22; float r23 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; v[t2][x - time + 12][y - time + 12][zind + 12] += r23; //printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23); } } } } } } }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(2*t1-2,3)),ceild(32*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(16*t1+Ny+29,24)),floord(32*t2+Ny+28,24)),floord(32*t1-32*t2+Nz+Ny+27,24));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(32*t2-Nz-2044,2048)),ceild(24*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(16*t1+Nx+29,2048)),floord(32*t2+Nx+28,2048)),floord(24*t3+Nx+20,2048)),floord(32*t1-32*t2+Nz+Nx+27,2048));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),24*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),24*t3+22),2048*t4+2046),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unop__identity_uint64_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint64_bool // op(A') function: GB_unop_tran__identity_uint64_bool // C type: uint64_t // A type: bool // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint64_bool ( uint64_t *Cx, // Cx and Ax may be aliased const bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kmp_atomic_cas.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <stdbool.h> #include <omp.h> #ifdef __cplusplus extern "C" { #endif typedef void* ident_t; extern bool __kmpc_atomic_bool_1_cas(ident_t *loc, int gtid, char *x, char e, char d); extern bool __kmpc_atomic_bool_2_cas(ident_t *loc, int gtid, short *x, short e, short d); extern bool __kmpc_atomic_bool_4_cas(ident_t *loc, int gtid, int *x, int e, int d); extern bool __kmpc_atomic_bool_8_cas(ident_t *loc, int gtid, long long *x, long long e, long long d); extern char __kmpc_atomic_val_1_cas(ident_t *loc, int gtid, char *x, char e, char d); extern short __kmpc_atomic_val_2_cas(ident_t *loc, int gtid, short *x, short e, short d); extern int __kmpc_atomic_val_4_cas(ident_t *loc, int gtid, int *x, int e, int d); extern long long __kmpc_atomic_val_8_cas(ident_t *loc, int gtid, long long *x, long long e, long long d); #ifdef __cplusplus } #endif int main() { int ret = 0; bool r; char c0 = 1; char c1 = 2; char c2 = 3; char co = 2; char cc = 0; short s0 = 11; short s1 = 12; short s2 = 13; short so = 12; short sc = 0; int i0 = 211; int i1 = 212; int i2 = 213; int io = 212; int ic = 0; long long l0 = 3111; long long l1 = 3112; long long l2 = 3113; long long lo = 3112; long long lc = 0; // initialize OpenMP runtime library omp_set_dynamic(0); // #pragma omp atomic compare update capture // { r = x == e; if(r) { x = d; } } // char, co == c1 initially, co == c2 finally r = __kmpc_atomic_bool_1_cas(NULL, 0, &co, c0, c2); // no-op if (co != c1) { ret++; printf("Error bool_1_cas no-op: %d != %d\n", co, c1); } if (r) { ret++; printf("Error bool_1_cas no-op ret: %d\n", r); } r = __kmpc_atomic_bool_1_cas(NULL, 0, &co, c1, c2); if (co != c2) { ret++; printf("Error bool_1_cas: %d != %d\n", co, c2); } if (!r) { ret++; printf("Error bool_1_cas ret: %d\n", r); } // short r = __kmpc_atomic_bool_2_cas(NULL, 0, &so, s0, s2); // no-op if (so != s1) { ret++; printf("Error bool_2_cas no-op: %d != %d\n", so, s1); } if (r) { ret++; printf("Error bool_2_cas no-op ret: %d\n", r); } r = __kmpc_atomic_bool_2_cas(NULL, 0, &so, s1, s2); if (so != s2) { ret++; printf("Error bool_2_cas: %d != %d\n", so, s2); } if (!r) { ret++; printf("Error bool_2_cas ret: %d\n", r); } // int r = __kmpc_atomic_bool_4_cas(NULL, 0, &io, i0, i2); // no-op if (io != i1) { ret++; printf("Error bool_4_cas no-op: %d != %d\n", io, i1); } if (r) { ret++; printf("Error bool_4_cas no-op ret: %d\n", r); } r = __kmpc_atomic_bool_4_cas(NULL, 0, &io, i1, i2); if (io != i2) { ret++; printf("Error bool_4_cas: %d != %d\n", io, i2); } if (!r) { ret++; printf("Error bool_4_cas ret: %d\n", r); } // long long r = __kmpc_atomic_bool_8_cas(NULL, 0, &lo, l0, l2); // no-op if (lo != l1) { ret++; printf("Error bool_8_cas no-op: %lld != %lld\n", lo, l1); } if (r) { ret++; printf("Error bool_8_cas no-op ret: %d\n", r); } r = __kmpc_atomic_bool_8_cas(NULL, 0, &lo, l1, l2); if (lo != l2) { ret++; printf("Error bool_8_cas: %lld != %lld\n", lo, l2); } if (!r) { ret++; printf("Error bool_8_cas ret: %d\n", r); } // #pragma omp atomic compare update capture // { v = x; if (x == e) { x = d; } } // char, co == c2 initially, co == c1 finally cc = __kmpc_atomic_val_1_cas(NULL, 0, &co, c0, c1); // no-op if (co != c2) { ret++; printf("Error val_1_cas no-op: %d != %d\n", co, c2); } if (cc != c2) { ret++; printf("Error val_1_cas no-op ret: %d != %d\n", cc, c2); } cc = __kmpc_atomic_val_1_cas(NULL, 0, &co, c2, c1); if (co != c1) { ret++; printf("Error val_1_cas: %d != %d\n", co, c1); } if (cc != c2) { ret++; printf("Error val_1_cas ret: %d != %d\n", cc, c2); } // short sc = __kmpc_atomic_val_2_cas(NULL, 0, &so, s0, s1); // no-op if (so != s2) { ret++; printf("Error val_2_cas no-op: %d != %d\n", so, s2); } if (sc != s2) { ret++; printf("Error val_2_cas no-op ret: %d != %d\n", sc, s2); } sc = __kmpc_atomic_val_2_cas(NULL, 0, &so, s2, s1); if (so != s1) { ret++; printf("Error val_2_cas: %d != %d\n", so, s1); } if (sc != s2) { ret++; printf("Error val_2_cas ret: %d != %d\n", sc, s2); } // int ic = __kmpc_atomic_val_4_cas(NULL, 0, &io, i0, i1); // no-op if (io != i2) { ret++; printf("Error val_4_cas no-op: %d != %d\n", io, i2); } if (ic != i2) { ret++; printf("Error val_4_cas no-op ret: %d != %d\n", ic, i2); } ic = __kmpc_atomic_val_4_cas(NULL, 0, &io, i2, i1); if (io != i1) { ret++; printf("Error val_4_cas: %d != %d\n", io, i1); } if (ic != i2) { ret++; printf("Error val_4_cas ret: %d != %d\n", ic, i2); } // long long lc = __kmpc_atomic_val_8_cas(NULL, 0, &lo, l0, l1); // no-op if (lo != l2) { ret++; printf("Error val_8_cas no-op: %lld != %lld\n", lo, l2); } if (lc != l2) { ret++; printf("Error val_8_cas no-op ret: %lld != %lld\n", lc, l2); } lc = __kmpc_atomic_val_8_cas(NULL, 0, &lo, l2, l1); if (lo != l1) { ret++; printf("Error val_8_cas: %lld != %lld\n", lo, l1); } if (lc != l2) { ret++; printf("Error val_8_cas ret: %lld != %lld\n", lc, l2); } // check in parallel i0 = 1; i1 = 0; for (io = 0; io < 5; ++io) { #pragma omp parallel num_threads(2) private(i2, ic, r) { if (omp_get_thread_num() == 0) { // th0 waits for th1 to increment i1, then th0 increments i0 #pragma omp atomic read i2 = i1; ic = __kmpc_atomic_val_4_cas(NULL, 0, &i0, i2, i2 + 1); while(ic != i2) { #pragma omp atomic read i2 = i1; ic = __kmpc_atomic_val_4_cas(NULL, 0, &i0, i2, i2 + 1); } } else { // th1 increments i1 if it is equal to i0 - 1, letting th0 to proceed r = 0; while(!r) { #pragma omp atomic read i2 = i0; r = __kmpc_atomic_bool_4_cas(NULL, 0, &i1, i2 - 1, i2); } } } } if (i0 != 6 || i1 != 5) { ret++; printf("Error in parallel, %d != %d or %d != %d\n", i0, 6, i1, 5); } if (ret == 0) printf("passed\n"); return ret; }
sparse_matrix_multiplication_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED ) #define KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED // System includes #include <vector> #include <math.h> #include <algorithm> #include <numeric> #ifdef _OPENMP #include <omp.h> #endif // External includes #include "amgcl/value_type/interface.hpp" // Project includes #include "includes/define.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class SparseMatrixMultiplicationUtility * @ingroup ContactStructuralMechanicsApplication * @brief An utility to multiply sparse matrix in Ublas * @details Taken and adapted for ublas from external_libraries/amgcl/detail/spgemm.hpp by Denis Demidov <dennis.demidov@gmail.com> * @todo Remove as soon as we do not depend of Ublas anymore... * @author Vicente Mataix Ferrandiz */ class SparseMatrixMultiplicationUtility { public: ///@name Type Definitions ///@{ /// Pointer definition of TreeContactSearch KRATOS_CLASS_POINTER_DEFINITION( SparseMatrixMultiplicationUtility ); /// The size type typedef std::size_t SizeType; /// The index type typedef std::size_t IndexType; /// The signed index type typedef std::ptrdiff_t SignedIndexType; /// A vector of indexes typedef DenseVector<IndexType> IndexVectorType; /// A vector of indexes (signed) typedef DenseVector<SignedIndexType> SignedIndexVectorType; ///@} ///@name Life Cycle ///@{ /// Default constructor SparseMatrixMultiplicationUtility(){}; /// Desctructor virtual ~SparseMatrixMultiplicationUtility()= default;; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Metafunction that returns value type of a matrix or a vector type. template <class T, class Enable = void> struct value_type { typedef typename T::value_type type; }; /** * @brief Matrix-matrix product C = A·B * @detail This method uses a template for each matrix * @param rA The first matrix * @param rB The second matrix * @param rC The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplication( const AMatrix& rA, const BMatrix& rB, CMatrix& rC ) { #ifdef _OPENMP const int nt = omp_get_max_threads(); #else const int nt = 1; #endif if (nt > 16) { MatrixMultiplicationRMerge(rA, rB, rC); } else { MatrixMultiplicationSaad(rA, rB, rC); } } /** * @brief The first is an OpenMP-enabled modification of classic algorithm from Saad * @details It is used whenever number of OpenMP cores is 4 or less. Saad, Yousef. Iterative methods for sparse linear systems. Siam, 2003. * @param A The first matrix to multiply * @param B The second matrix to multiply * @param C The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplicationSaad( const AMatrix& A, const BMatrix& B, CMatrix& C ) { typedef typename value_type<CMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = B.size2(); // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); // Get access to A, B and C data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType* c_ptr = new IndexType[nrows + 1]; c_ptr[0] = 0; #pragma omp parallel { SignedIndexVectorType marker(ncols); for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill) marker[i_fill] = -1; #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; IndexType C_cols = 0; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const IndexType row_begin_b = index1_b[ca]; const IndexType row_end_b = index1_b[ca+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; if (marker[cb] != ia) { marker[cb] = ia; ++C_cols; } } } c_ptr[ia + 1] = C_cols; } } // We initialize the sparse matrix std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr); const SizeType nonzero_values = c_ptr[nrows]; IndexType* aux_index2_c = new IndexType[nonzero_values]; ValueType* aux_val_c = new ValueType[nonzero_values]; #pragma omp parallel { SignedIndexVectorType marker(ncols); for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill) marker[i_fill] = -1; #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; const IndexType row_beg = c_ptr[ia]; IndexType row_end = row_beg; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const ValueType va = values_a[ja]; const IndexType row_begin_b = index1_b[ca]; const IndexType row_end_b = index1_b[ca+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; const ValueType vb = values_b[jb]; if (marker[cb] < static_cast<SignedIndexType>(row_beg)) { marker[cb] = row_end; aux_index2_c[row_end] = cb; aux_val_c[row_end] = va * vb; ++row_end; } else { aux_val_c[marker[cb]] += va * vb; } } } } } // We reorder the rows SortRows(c_ptr, nrows, ncols, aux_index2_c, aux_val_c); // We fill the matrix CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c); // Release memory delete[] c_ptr; delete[] aux_index2_c; delete[] aux_val_c; } /** * @brief Row-merge algorithm from Rupp et al. * @details The algorithm requires less memory and shows much better scalability than classic one. It is used when number of OpenMP cores is more than 4. * @param A The first matrix to multiply * @param B The second matrix to multiply * @param C The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplicationRMerge( const AMatrix &A, const BMatrix &B, CMatrix &C ) { typedef typename value_type<CMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = B.size2(); // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); // Get access to A and B data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType max_row_width = 0; #pragma omp parallel { IndexType my_max = 0; #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; IndexType row_width = 0; for(IndexType j = row_beg; j < row_end; ++j) { const IndexType a_col = index2_a[j]; row_width += index1_b[a_col + 1] - index1_b[a_col]; } my_max = std::max(my_max, row_width); } #pragma omp critical max_row_width = std::max(max_row_width, my_max); } #ifdef _OPENMP const int nthreads = omp_get_max_threads(); #else const int nthreads = 1; #endif std::vector< std::vector<IndexType> > tmp_col(nthreads); std::vector< std::vector<ValueType> > tmp_val(nthreads); for(int i = 0; i < nthreads; ++i) { tmp_col[i].resize(3 * max_row_width); tmp_val[i].resize(2 * max_row_width); } // We create the c_ptr auxiliar variable IndexType* c_ptr = new IndexType[nrows + 1]; c_ptr[0] = 0; #pragma omp parallel { #ifdef _OPENMP const int tid = omp_get_thread_num(); #else const int tid = 0; #endif IndexType* t_col = &tmp_col[tid][0]; #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; c_ptr[i+1] = ProdRowWidth( index2_a + row_beg, index2_a + row_end, index1_b, index2_b, t_col, t_col + max_row_width, t_col + 2 * max_row_width ); } } // We initialize the sparse matrix std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr); const SizeType nonzero_values = c_ptr[nrows]; IndexType* aux_index2_c = new IndexType[nonzero_values]; ValueType* aux_val_c = new ValueType[nonzero_values]; #pragma omp parallel { #ifdef _OPENMP const int tid = omp_get_thread_num(); #else const int tid = 0; #endif IndexType* t_col = tmp_col[tid].data(); ValueType *t_val = tmp_val[tid].data(); #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; ProdRow(index2_a + row_beg, index2_a + row_end, values_a + row_beg, index1_b, index2_b, values_b, aux_index2_c + c_ptr[i], aux_val_c + c_ptr[i], t_col, t_val, t_col + max_row_width, t_val + max_row_width ); } } // We fill the matrix CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c); // Release memory delete[] c_ptr; delete[] aux_index2_c; delete[] aux_val_c; } /** * @brief The first is a method in order to sum to sparse matrices in a efficient way * @param A The resulting matrix * @param B The second matrix to sum */ template <class AMatrix, class BMatrix> static void MatrixAdd( AMatrix& A, const BMatrix& B, const double Factor = 1.0 ) { typedef typename value_type<AMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = A.size2(); /* Some checks */ // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); KRATOS_ERROR_IF_NOT(nrows == B.size1()) << "The second matrix has a wrong number of rows" << std::endl; KRATOS_ERROR_IF_NOT(ncols == B.size2()) << "The second matrix has a wrong number of columns" << std::endl; // Get access to A and B data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType* new_a_ptr = new IndexType[nrows + 1]; new_a_ptr[0] = 0; #pragma omp parallel { #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { SignedIndexVectorType marker(ncols); for (int i = 0; i < static_cast<int>(ncols); ++i) marker[i] = -1; // Initialize IndexType new_A_cols = 0; // Iterate over A const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; marker[ca] = 1; ++new_A_cols; } // Iterate over B const IndexType row_begin_b = index1_b[ia]; const IndexType row_end_b = index1_b[ia+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; if (marker[cb] < 0) { marker[cb] = 1; ++new_A_cols; } } new_a_ptr[ia + 1] = new_A_cols; } } // We initialize the sparse matrix std::partial_sum(new_a_ptr, new_a_ptr + nrows + 1, new_a_ptr); const SizeType nonzero_values = new_a_ptr[nrows]; IndexType* aux_index2_new_a = new IndexType[nonzero_values]; ValueType* aux_val_new_a = new ValueType[nonzero_values]; #pragma omp parallel { #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { SignedIndexVectorType marker(ncols); for (int i = 0; i < static_cast<int>(ncols); ++i) marker[i] = -1; // Initialize const IndexType row_beg = new_a_ptr[ia]; IndexType row_end = row_beg; // Iterate over A const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const ValueType va = values_a[ja]; marker[ca] = row_end; aux_index2_new_a[row_end] = ca; aux_val_new_a[row_end] = va; ++row_end; } // Iterate over B const IndexType row_begin_b = index1_b[ia]; const IndexType row_end_b = index1_b[ia+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; const ValueType vb = values_b[jb]; if (marker[cb] < 0) { marker[cb] = row_end; aux_index2_new_a[row_end] = cb; aux_val_new_a[row_end] = Factor * vb; ++row_end; } else { aux_val_new_a[marker[cb]] += Factor * vb; } } } } // We reorder the rows SortRows(new_a_ptr, nrows, ncols, aux_index2_new_a, aux_val_new_a); // We fill the matrix CreateSolutionMatrix(A, nrows, ncols, new_a_ptr, aux_index2_new_a, aux_val_new_a); // Release memory delete[] new_a_ptr; delete[] aux_index2_new_a; delete[] aux_val_new_a; } /** * @brief This method computes of the transpose matrix of a given matrix * @param rA The resulting matrix * @param rB The second matrix to transpose */ template <class AMatrix, class BMatrix> static void TransposeMatrix( AMatrix& rA, const BMatrix& rB, const double Factor = 1.0 ) { typedef typename value_type<AMatrix>::type ValueType; // Get access to B data const IndexType* index1 = rB.index1_data().begin(); const IndexType* index2 = rB.index2_data().begin(); const ValueType* data = rB.value_data().begin(); const SizeType transpose_nonzero_values = rB.value_data().end() - rB.value_data().begin(); const SizeType size_system_1 = rB.size1(); const SizeType size_system_2 = rB.size2(); if (rA.size1() != size_system_2 || rA.size2() != size_system_1 ) { rA.resize(size_system_2, size_system_1, false); } IndexVectorType new_a_ptr(size_system_2 + 1); #pragma omp parallel for for (int i = 0; i < static_cast<int>(size_system_2 + 1); ++i) new_a_ptr[i] = 0; IndexVectorType aux_index2_new_a(transpose_nonzero_values); DenseVector<ValueType> aux_val_new_a(transpose_nonzero_values); #pragma omp parallel for for (int i=0; i<static_cast<int>(size_system_1); ++i) { IndexType row_begin = index1[i]; IndexType row_end = index1[i+1]; for (IndexType j=row_begin; j<row_end; j++) { #pragma omp atomic new_a_ptr[index2[j] + 1] += 1; } } // We initialize the blocks sparse matrix std::partial_sum(new_a_ptr.begin(), new_a_ptr.end(), &new_a_ptr[0]); IndexVectorType aux_indexes(size_system_2); #pragma omp parallel for for (int i = 0; i < static_cast<int>(size_system_2); ++i) aux_indexes[i] = 0; // #pragma omp parallel for for (int i=0; i<static_cast<int>(size_system_1); ++i) { IndexType row_begin = index1[i]; IndexType row_end = index1[i+1]; for (IndexType j=row_begin; j<row_end; j++) { const IndexType current_row = index2[j]; const IndexType initial_position = new_a_ptr[current_row]; const IndexType current_index = initial_position + aux_indexes[current_row]; aux_index2_new_a[current_index] = i; aux_val_new_a[current_index] = Factor * data[j]; // #pragma omp atomic aux_indexes[current_row] += 1; } } // We reorder the rows SortRows(&new_a_ptr[0], size_system_2, size_system_1, &aux_index2_new_a[0], &aux_val_new_a[0]); // We fill the matrix CreateSolutionMatrix(rA, size_system_2, size_system_1, &new_a_ptr[0], &aux_index2_new_a[0], &aux_val_new_a[0]); } /** * @brief This method is designed to create the final solution sparse matrix from the auxiliar values * @param C The matrix solution * @param NRows The number of rows of the matrix * @param NCols The number of columns of the matrix * @param CPtr The indexes taht indicate the number of nonzero values in each column * @param AuxIndex2C The indexes of the nonzero columns * @param AuxValC The C array containing the values of the sparse matrix */ template <class CMatrix, typename TSize, typename Ptr, typename IndexType, typename ValueType> static inline void CreateSolutionMatrix( CMatrix& C, const TSize NRows, const TSize NCols, const Ptr* CPtr, const IndexType* AuxIndex2C, const ValueType* AuxValC ) { // Exiting just in case of empty matrix if ((NRows == 0) || (NCols == 0)) return void(); // Auxiliar values const TSize nonzero_values = CPtr[NRows]; C = CMatrix(NRows, NCols, nonzero_values); IndexType* index1_c = C.index1_data().begin(); IndexType* index2_c = C.index2_data().begin(); double* values_c = C.value_data().begin(); index1_c[0] = 0; for (TSize i = 0; i < NRows; i++) index1_c[i+1] = index1_c[i] + (CPtr[i+1] - CPtr[i]); #pragma omp parallel for for (int i = 0; i < static_cast<int>(nonzero_values); i++) { KRATOS_DEBUG_ERROR_IF(AuxIndex2C[i] > static_cast<IndexType>(NCols)) << "Index " << AuxIndex2C[i] <<" is greater than the number of columns " << NCols << std::endl; index2_c[i] = AuxIndex2C[i]; values_c[i] = AuxValC[i]; } C.set_filled(NRows+1, nonzero_values); } /** * @brief This method is designed to reorder the rows by columns * @param NRows The number of rows of the matrix * @param NCols The number of columns of the matrix * @param CPtr The indexes taht indicate the number of nonzero values in each column * @param Columns The columns of the problem * @param Values The values (to be ordered with the rows) */ template <typename TSize, typename Col, typename TIndexType, typename ValueType> static inline void SortRows( const TIndexType* CPtr, const TSize NRows, const TSize NCols, Col* Columns, ValueType* Values ) { #pragma omp parallel { #pragma omp for for (int i_row=0; i_row<static_cast<int>(NRows); i_row++) { const TIndexType row_beg = CPtr[i_row]; const TIndexType row_end = CPtr[i_row + 1]; for(IndexType j = 1; j < row_end - row_beg; ++j) { const IndexType c = Columns[j + row_beg]; const double v = Values[j + row_beg]; SignedIndexType i = j - 1; while(i >= 0 && Columns[i + row_beg] > c) { KRATOS_DEBUG_ERROR_IF(Columns[i + row_beg] > static_cast<Col>(NCols)) << " Index for column: " << i + row_beg << ". Index " << Columns[i + row_beg] <<" is greater than the number of columns " << NCols << std::endl; Columns[i + 1 + row_beg] = Columns[i + row_beg]; Values[i + 1 + row_beg] = Values[i + row_beg]; i--; } Columns[i + 1 + row_beg] = c; Values[i + 1 + row_beg] = v; } } } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const { return "SparseMatrixMultiplicationUtility"; } /// Print information about this object. void PrintInfo (std::ostream& rOStream) const { rOStream << "SparseMatrixMultiplicationUtility"; } /// Print object's data. void PrintData (std::ostream& rOStream) const { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method is oriented to merge rows * @param Column1 The index of the first matrix column * @param Column1End The last index of the first matrix column * @param Column2 The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Column3 The index of the third matrix column * @return The resulting row */ template <bool TNeedOut, class TIndex> static TIndex* MergeRows( const TIndex* Column1, const TIndex* Column1End, const TIndex* Column2, const TIndex* Column2End, TIndex* Column3 ) { while(Column1 != Column1End && Column2 != Column2End) { TIndex c1 = *Column1; TIndex c2 = *Column2; if (c1 < c2) { if (TNeedOut) *Column3 = c1; ++Column1; } else if (c1 == c2) { if (TNeedOut) *Column3 = c1; ++Column1; ++Column2; } else { if (TNeedOut) *Column3 = c2; ++Column2; } ++Column3; } if (TNeedOut) { if (Column1 < Column1End) { return std::copy(Column1, Column1End, Column3); } else if (Column2 < Column2End) { return std::copy(Column2, Column2End, Column3); } else { return Column3; } } else { return Column3 + (Column1End - Column1) + (Column2End - Column2); } } /** * @brief This method is oriented to merge rows * @param rAlpha1 The coefficient of the first matrix * @param Column1 The index of the first matrix column * @param Column1End The last index of the first matrix column * @param Value1 The values of the first matrix * @param rAlpha2 The coefficient of the second matrix * @param Column2 The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Value2 The values of the second matrix * @param Column3 The index of the third matrix column * @param Value3 The values of the third matrix * @return The resulting row */ template <class TIndex, class TValueType> static TIndex* MergeRows( const TValueType &rAlpha1, const TIndex* Column1, const TIndex* Column1End, const TValueType *Value1, const TValueType &rAlpha2, const TIndex* Column2, const TIndex* Column2End, const TValueType *Value2, TIndex* Column3, TValueType *Value3 ) { while(Column1 != Column1End && Column2 != Column2End) { TIndex c1 = *Column1; TIndex c2 = *Column2; if (c1 < c2) { ++Column1; *Column3 = c1; *Value3 = rAlpha1 * (*Value1++); } else if (c1 == c2) { ++Column1; ++Column2; *Column3 = c1; *Value3 = rAlpha1 * (*Value1++) + rAlpha2 * (*Value2++); } else { ++Column2; *Column3 = c2; *Value3 = rAlpha2 * (*Value2++); } ++Column3; ++Value3; } while(Column1 < Column1End) { *Column3++ = *Column1++; *Value3++ = rAlpha1 * (*Value1++); } while(Column2 < Column2End) { *Column3++ = *Column2++; *Value3++ = rAlpha2 * (*Value2++); } return Column3; } /** * @brief This method is oriented to multiply rows * @param AColumn The index of the first matrix column * @param AColumnEnd The last index of the first matrix column * @param BPtr The array constining the nonzero values per row of the second matrix * @param BColumn The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Tmp1Column Indexes of the columns of first matrix * @param Tmp2Column Indexes of the columns of second matrix * @param Tmp3Column Indexes of the columns of third matrix * @return The resulting row */ template <class TIndex> static TIndex ProdRowWidth( const TIndex* AColumn, const TIndex* AColumnEnd, const TIndex* BPtr, const TIndex* BColumn, TIndex* Tmp1Column, TIndex* Tmp2Column, TIndex* Tmp3Column ) { const TIndex nrow = AColumnEnd - AColumn; /* No rows to merge, nothing to do */ if (nrow == 0) return 0; /* Single row, just copy it to output */ if (nrow == 1) return BPtr[*AColumn + 1] - BPtr[*AColumn]; /* Two rows, merge them */ if (nrow == 2) { int a1 = AColumn[0]; int a2 = AColumn[1]; return MergeRows<false>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column) - Tmp1Column; } /* Generic case (more than two rows). * * Merge rows by pairs, then merge the results together. * When merging two rows, the result is always wider (or equal). * Merging by pairs allows to work with short rows as often as possible. */ // Merge first two. TIndex a1 = *AColumn++; TIndex a2 = *AColumn++; TIndex c_col1 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column ) - Tmp1Column; // Go by pairs. while(AColumn + 1 < AColumnEnd) { a1 = *AColumn++; a2 = *AColumn++; TIndex c_col2 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column; if (AColumn == AColumnEnd) { return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column; } else { c_col1 = MergeRows<true>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column; std::swap(Tmp1Column, Tmp3Column); } } // Merge the tail. a2 = *AColumn; return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column; } /** * @brief This method is oriented to multiply rows * @param AColumn The index of the first matrix column * @param AColumnEnd The last index of the first matrix column * @param AValue The values of the first matrix * @param BPtr The array constining the nonzero values per row of the second matrix * @param BColumn The index of the second matrix column * @param BValue The values of the second matrix * @param OutColumn Indexes of the columns of output matrix * @param OutValue Values of the columns of output matrix * @param Tmp2Column Indexes of the columns of second matrix * @param Tmp2Value Values of the columns of second matrix * @param Tmp3Column Indexes of the columns of third matrix * @param Tmp3Value Values of the columns of third matrix * @return The resulting row */ template <class TIndex, class TValueType> static void ProdRow( const TIndex* AColumn, const TIndex* AColumnEnd, const TValueType *AValue, const TIndex* BPtr, const TIndex* BColumn, const TValueType *BValue, TIndex* OutColumn, TValueType *OutValue, TIndex* Tmp2Column, TValueType *Tmp2Value, TIndex* Tmp3Column, TValueType *Tmp3Value ) { const TIndex nrow = AColumnEnd - AColumn; /* No rows to merge, nothing to do */ if (nrow == 0) return; /* Single row, just copy it to output */ if (nrow == 1) { TIndex ac = *AColumn; TValueType av = *AValue; const TValueType *bv = BValue + BPtr[ac]; const TIndex* bc = BColumn + BPtr[ac]; const TIndex* be = BColumn + BPtr[ac+1]; while(bc != be) { *OutColumn++ = *bc++; *OutValue++ = av * (*bv++); } return; } /* Two rows, merge them */ if (nrow == 2) { TIndex ac1 = AColumn[0]; TIndex ac2 = AColumn[1]; TValueType av1 = AValue[0]; TValueType av2 = AValue[1]; MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], OutColumn, OutValue ); } /* Generic case (more than two rows). * * Merge rows by pairs, then merge the results together. * When merging two rows, the result is always wider (or equal). * Merging by pairs allows to work with short rows as often as possible. */ // Merge first two. TIndex ac1 = *AColumn++; TIndex ac2 = *AColumn++; TValueType av1 = *AValue++; TValueType av2 = *AValue++; TIndex* tm1_col = OutColumn; TValueType *tm1_val = OutValue; TIndex c_col1 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], tm1_col, tm1_val ) - tm1_col; // Go by pairs. while(AColumn + 1 < AColumnEnd) { ac1 = *AColumn++; ac2 = *AColumn++; av1 = *AValue++; av2 = *AValue++; TIndex c_col2 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp2Column, Tmp2Value ) - Tmp2Column; c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, amgcl::math::identity<TValueType>(), Tmp2Column, Tmp2Column + c_col2, Tmp2Value, Tmp3Column, Tmp3Value ) - Tmp3Column; std::swap(Tmp3Column, tm1_col); std::swap(Tmp3Value, tm1_val); } // Merge the tail if there is one. if (AColumn < AColumnEnd) { ac2 = *AColumn++; av2 = *AValue++; c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp3Column, Tmp3Value ) - Tmp3Column; std::swap(Tmp3Column, tm1_col); std::swap(Tmp3Value, tm1_val); } // If we are lucky, tm1 now points to out. // Otherwise, copy the results. if (tm1_col != OutColumn) { std::copy(tm1_col, tm1_col + c_col1, OutColumn); std::copy(tm1_val, tm1_val + c_col1, OutValue); } return; } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class SparseMatrixMultiplicationUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ // /****************************** INPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::istream& operator >> (std::istream& rIStream, // SparseMatrixMultiplicationUtility& rThis); // // /***************************** OUTPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::ostream& operator << (std::ostream& rOStream, // const SparseMatrixMultiplicationUtility& rThis) // { // return rOStream; // } ///@} } // namespace Kratos. #endif // KRATOS_TREE_CONTACT_SEARCH_H_INCLUDED defined