source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_subassign_03.c
//------------------------------------------------------------------------------ // GB_subassign_03: C(I,J) += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 03: C(I,J) += scalar ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: scalar // S: constructed // C is not bitmap: use GB_bitmap_assign instead #include "GB_subassign_methods.h" GrB_Info GB_subassign_03 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; // C must not be bitmap const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; const int64_t Cnvec = C->nvec ; GB_GET_S ; GB_GET_ACCUM_SCALAR ; //-------------------------------------------------------------------------- // Method 03: C(I,J) += scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal; must visit all IxJ, so Omega(|I|*|J|) is required. // Entries in S are found and the corresponding entry in C replaced with // the scalar. // Method 01 and Method 03 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC) += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; if (!found) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, the scalar is present // [. A 1]: action: ( insert ) task_pending++ ; } else { // ----[C A 1] or [X A 1]----------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_scalar ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC) += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; if (!found) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, the scalar is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } else { // both S (i,j) and A (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
magsac.h
#pragma once #include <limits> #include <chrono> #include <memory> #include "model.h" #include "model_score.h" #include "sampler.h" #include "uniform_sampler.h" #include <math.h> #include "gamma_values.cpp" #ifdef _WIN32 #include <ppl.h> #endif template <class DatumType, class ModelEstimator> class MAGSAC { public: enum Version { // The original version of MAGSAC. It works well, however, can be quite slow in many cases. MAGSAC_ORIGINAL, // The recently proposed MAGSAC++ algorithm which keeps the accuracy of the original MAGSAC but is often orders of magnitude faster. MAGSAC_PLUS_PLUS }; MAGSAC(const Version magsac_version_ = Version::MAGSAC_PLUS_PLUS) : time_limit(std::numeric_limits<double>::max()), // desired_fps(-1), iteration_limit(std::numeric_limits<size_t>::max()), maximum_threshold(10.0), apply_post_processing(true), mininum_iteration_number(50), partition_number(5), core_number(1), number_of_irwls_iters(1), interrupting_threshold(1.0), last_iteration_number(0), log_confidence(0), point_number(0), magsac_version(magsac_version_) { } ~MAGSAC() {} // A function to run MAGSAC. bool run( const cv::Mat &points_, // The input data points const double confidence_, // The required confidence in the results ModelEstimator& estimator_, // The model estimator gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, // The sampler used gcransac::Model &obtained_model_, // The estimated model parameters int &iteration_number_, // The number of iterations done ModelScore &model_score_); // The score of the estimated model // A function to set the maximum inlier-outlier threshold void setMaximumThreshold(const double maximum_threshold_) { maximum_threshold = maximum_threshold_; } // A function to set the inlier-outlier threshold used for speeding up the procedure // and for determining the required number of iterations. void setReferenceThreshold(const double threshold_) { interrupting_threshold = threshold_; } double getReferenceThreshold() { return interrupting_threshold; } // Setting the flag determining if post-processing is needed void applyPostProcessing(bool value_) { apply_post_processing = value_; } // A function to set the maximum number of iterations void setIterationLimit(size_t iteration_limit_) { iteration_limit = iteration_limit_; } // A function to set the minimum number of iterations void setMinimumIterationNumber(size_t mininum_iteration_number_) { mininum_iteration_number = mininum_iteration_number_; } // A function to set the number of cores used in the original MAGSAC algorithm. // In MAGSAC++, it is not used. Note that when multiple MAGSACs run in parallel, // it is beneficial to keep the core number one for each independent MAGSAC. // Otherwise, the threads will act weirdly. void setCoreNumber(size_t core_number_) { if (magsac_version == MAGSAC_PLUS_PLUS) fprintf(stderr, "Setting the core number for MAGSAC++ is deprecated."); core_number = core_number_; } // Setting the number of partitions used in the original MAGSAC algorithm // to speed up the procedure. In MAGSAC++, this parameter is not used. void setPartitionNumber(size_t partition_number_) { if (magsac_version == MAGSAC_PLUS_PLUS) fprintf(stderr, "Setting the partition number for MAGSAC++ is deprecated."); partition_number = partition_number_; } // A function to set a desired minimum frames-per-second (FPS) value. void setFPS(int fps_) { desired_fps = fps_; // The required FPS. // The time limit which the FPS implies time_limit = fps_ <= 0 ? std::numeric_limits<double>::max() : 1.0 / fps_; } // The post-processing algorithm applying sigma-consensus to the input model once. bool postProcessing( const cv::Mat &points, // All data points const gcransac::Model &so_far_the_best_model, // The input model to be improved gcransac::Model &output_model, // The improved model parameters ModelScore &output_score, // The score of the improved model const ModelEstimator &estimator); // The model estimator // The function determining the quality/score of a model using the original MAGSAC // criterion. Note that this function is significantly slower than the quality // function of MAGSAC++. void getModelQuality( const cv::Mat& points_, // All data points const gcransac::Model& model_, // The input model const ModelEstimator& estimator_, // The model estimator double& marginalized_iteration_number_, // The required number of iterations marginalized over the noise scale double& score_); // The score/quality of the model // The function determining the quality/score of a // model using the MAGSAC++ criterion. void getModelQualityPlusPlus( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &score_, // The score to be calculated const double &previous_best_score_); // The score of the previous so-far-the-best model size_t number_of_irwls_iters; protected: Version magsac_version; // The version of MAGSAC used size_t iteration_limit; // Maximum number of iterations allowed size_t mininum_iteration_number; // Minimum number of iteration before terminating double maximum_threshold; // The maximum sigma value size_t core_number; // Number of core used in sigma-consensus double time_limit; // A time limit after the algorithm is interrupted int desired_fps; // The desired FPS (TODO: not tested with MAGSAC) bool apply_post_processing; // Decides if the post-processing step should be applied int point_number; // The current point number int last_iteration_number; // The iteration number implied by the last run of sigma-consensus double log_confidence; // The logarithm of the required confidence size_t partition_number; // Number of partitions used to speed up sigma-consensus double interrupting_threshold; // A threshold to speed up MAGSAC by interrupting the sigma-consensus procedure whenever there is no chance of being better than the previous so-far-the-best model bool sigmaConsensus( const cv::Mat& points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore& score_, const ModelEstimator& estimator_, const ModelScore& best_score_); bool sigmaConsensusPlusPlus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_); }; template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::run( const cv::Mat& points_, const double confidence_, ModelEstimator& estimator_, gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, gcransac::Model& obtained_model_, int& iteration_number_, ModelScore &model_score_) { // Initialize variables std::chrono::time_point<std::chrono::system_clock> start, end; // Variables for time measuring: start and end times std::chrono::duration<double> elapsed_seconds; // Variables for time measuring: elapsed time log_confidence = log(1.0 - confidence_); // The logarithm of 1 - confidence point_number = points_.rows; // Number of points const int sample_size = estimator_.sampleSize(); // The sample size required for the estimation size_t max_iteration = iteration_limit; // The maximum number of iterations initialized to the iteration limit int iteration = 0; // Current number of iterations gcransac::Model so_far_the_best_model; // Current best model ModelScore so_far_the_best_score; // The score of the current best model std::unique_ptr<size_t[]> minimal_sample(new size_t[sample_size]); // The sample used for the estimation std::vector<size_t> pool(points_.rows); for (size_t point_idx = 0; point_idx < point_number; ++point_idx) pool[point_idx] = point_idx; if (points_.rows < sample_size) { fprintf(stderr, "There are not enough points for applying robust estimation. Minimum is %d; while %d are given.\n", sample_size, points_.rows); return false; } // Set the start time variable if there is some time limit set if (desired_fps > -1) start = std::chrono::system_clock::now(); constexpr size_t max_unsuccessful_model_generations = 50; // Main MAGSAC iteration while (mininum_iteration_number > iteration || iteration < max_iteration) { // Increase the current iteration number ++iteration; // Sample a minimal subset std::vector<gcransac::Model> models; // The set of estimated models size_t unsuccessful_model_generations = 0; // The number of unsuccessful model generations // Try to select a minimal sample and estimate the implied model parameters while (++unsuccessful_model_generations < max_unsuccessful_model_generations) { // Get a minimal sample randomly if (!sampler_.sample(pool, // The index pool from which the minimal sample can be selected minimal_sample.get(), // The minimal sample sample_size)) // The size of a minimal sample continue; // Check if the selected sample is valid before estimating the model // parameters which usually takes more time. if (!estimator_.isValidSample(points_, // All points minimal_sample.get())) // The current sample continue; // Estimate the model from the minimal sample if (estimator_.estimateModel(points_, // All data points minimal_sample.get(), // The selected minimal sample &models)) // The estimated models break; } // If the method was not able to generate any usable models, break the cycle. iteration += unsuccessful_model_generations - 1; // Select the so-far-the-best from the estimated models for (const auto &model : models) { ModelScore score; // The score of the current model gcransac::Model refined_model; // The refined model parameters // Apply sigma-consensus to refine the model parameters by marginalizing over the noise level sigma bool success; if (magsac_version == Version::MAGSAC_ORIGINAL) success = sigmaConsensus(points_, model, refined_model, score, estimator_, so_far_the_best_score); else success = sigmaConsensusPlusPlus(points_, model, refined_model, score, estimator_, so_far_the_best_score); // Continue if the model was rejected if (!success || score.score == -1) continue; // Save the iteration number when the current model is found score.iteration = iteration; // Update the best model parameters if needed if (so_far_the_best_score < score) { so_far_the_best_model = refined_model; // Update the best model parameters so_far_the_best_score = score; // Update the best model's score max_iteration = MIN(max_iteration, last_iteration_number); // Update the max iteration number, but do not allow to increase } } // Update the time parameters if a time limit is set if (desired_fps > -1) { end = std::chrono::system_clock::now(); elapsed_seconds = end - start; // Interrupt if the time limit is exceeded if (elapsed_seconds.count() > time_limit) break; } } // Apply sigma-consensus as a post processing step if needed and the estimated model is valid if (apply_post_processing) { // TODO } obtained_model_ = so_far_the_best_model; iteration_number_ = iteration; model_score_ = so_far_the_best_score; return so_far_the_best_score.score > 0; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::postProcessing( const cv::Mat &points_, const gcransac::Model &model_, gcransac::Model &refined_model_, ModelScore &refined_score_, const ModelEstimator &estimator_) { fprintf(stderr, "Sigma-consensus++ is not implemented yet as post-processing.\n"); return false; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_) { // Set up the parameters constexpr double L = 1.05; constexpr double k = ModelEstimator::getSigmaQuantile(); constexpr double threshold_to_sigma_multiplier = 1.0 / k; constexpr size_t sample_size = estimator_.sampleSize(); static auto comparator = [](std::pair<double, int> left, std::pair<double, int> right) { return left.first < right.first; }; const int point_number = points_.rows; double current_maximum_sigma = this->maximum_threshold; // Calculating the residuals std::vector< std::pair<double, size_t> > all_residuals; all_residuals.reserve(point_number); // If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better if (best_score_.inlier_number > 0) { // Number of inliers which should be exceeded int points_remaining = best_score_.inlier_number; // Collect the points which are closer than the threshold which the maximum sigma implies for (int point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index all_residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) --points_remaining; } // Interrupt if there is no chance of being better // TODO: replace this part by SPRT test if (point_number - point_idx < points_remaining) return false; } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = best_score_.inlier_number - points_remaining; } else { // The number of really close points size_t points_close = 0; // Collect the points which are closer than the threshold which the maximum sigma implies for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index all_residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; } std::vector<gcransac::Model> sigma_models; std::vector<size_t> sigma_inliers; std::vector<double> final_weights; // The number of possible inliers const size_t possible_inlier_number = all_residuals.size(); // Sort the residuals in ascending order std::sort(all_residuals.begin(), all_residuals.end(), comparator); // The maximum threshold is set to be slightly bigger than the distance of the // farthest possible inlier. current_maximum_sigma = all_residuals.back().first + std::numeric_limits<double>::epsilon(); const double sigma_step = current_maximum_sigma / partition_number; last_iteration_number = 10000; score_.score = 0; // The weights calculated by each parallel process std::vector<std::vector<double>> point_weights_par(partition_number, std::vector<double>(possible_inlier_number, 0)); // If OpenMP is used, calculate things in parallel #ifdef USE_OPENMP #pragma omp parallel for num_threads(core_number) for (int partition_idx = 0; partition_idx < partition_number; ++partition_idx) { // The maximum sigma value in the current partition const double max_sigma = (partition_idx + 1) * sigma_step; // Find the last element which has smaller distance than 'max_threshold' // Since the vector is ordered binary search can be used to find that particular element. const auto &last_element = std::upper_bound(all_residuals.begin(), all_residuals.end(), std::make_pair(max_sigma, 0), comparator); const size_t sigma_inlier_number = last_element - all_residuals.begin(); // Put the indices into a vector std::vector<size_t> sigma_inliers; sigma_inliers.reserve(sigma_inlier_number); // Store the points which are closer than the current sigma limit for (size_t relative_point_idx = 0; relative_point_idx < sigma_inlier_number; ++relative_point_idx) sigma_inliers.emplace_back(all_residuals[relative_point_idx].second); // Check if there are enough inliers to fit a model if (sigma_inliers.size() > sample_size) { // Estimating the model which the current set of inliers imply std::vector<gcransac::Model> sigma_models; estimator_.estimateModelNonminimal(points_, &(sigma_inliers)[0], sigma_inlier_number, &sigma_models); // If the estimation was successful calculate the implied probabilities if (sigma_models.size() == 1) { const double max_sigma_squared_2 = 2 * max_sigma * max_sigma; double residual_i_2, // The residual of the i-th point probability_i; // The probability of the i-th point // Iterate through all points to estimate the related probabilities for (size_t relative_point_idx = 0; relative_point_idx < sigma_inliers.size(); ++relative_point_idx) { // TODO: Replace with Chi-square instead of normal distribution const size_t &point_idx = sigma_inliers[relative_point_idx]; // Calculate the residual of the current point residual_i_2 = estimator_.squaredResidual(points_.row(point_idx), sigma_models[0]); // Calculate the probability of the i-th point assuming Gaussian distribution // TODO: replace by Chi-square distribution probability_i = exp(-residual_i_2 / max_sigma_squared_2); // Store the probability of the i-th point coming from the current partition point_weights_par[partition_idx][relative_point_idx] += probability_i; } } } } #else fprintf(stderr, "Not implemented yet.\n"); #endif // The weights used for the final weighted least-squares fitting final_weights.reserve(possible_inlier_number); // Collect all points which has higher probability of being inlier than zero sigma_inliers.reserve(possible_inlier_number); for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx) { // Calculate the weight of the current point double weight = 0.0; for (size_t partition_idx = 0; partition_idx < partition_number; ++partition_idx) weight += point_weights_par[partition_idx][point_idx]; // If the weight is approx. zero, continue. if (weight < std::numeric_limits<double>::epsilon()) continue; // Store the index and weight of the current point sigma_inliers.emplace_back(all_residuals[point_idx].second); final_weights.emplace_back(weight); } // If there are fewer inliers than the size of the minimal sample interupt the procedure if (sigma_inliers.size() < sample_size) return false; // Estimate the model parameters using weighted least-squares fitting if (!estimator_.estimateModelNonminimal( points_, // All input points &(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier static_cast<int>(sigma_inliers.size()), // Number of possible inliers &sigma_models, // Estimated models &(final_weights)[0])) // Weights of points return false; bool is_model_updated = false; if (sigma_models.size() == 1 && // If only a single model is estimated estimator_.isValidModel(sigma_models.back(), points_, sigma_inliers, &(sigma_inliers)[0], interrupting_threshold, is_model_updated)) // and it is valid { // Return the refined model refined_model_ = sigma_models.back(); // Calculate the score of the model and the implied iteration number double marginalized_iteration_number; getModelQuality(points_, // All the input points refined_model_, // The estimated model estimator_, // The estimator marginalized_iteration_number, // The marginalized inlier ratio score_.score); // The marginalized score if (marginalized_iteration_number < 0 || std::isnan(marginalized_iteration_number)) last_iteration_number = std::numeric_limits<int>::max(); else last_iteration_number = static_cast<int>(round(marginalized_iteration_number)); return true; } return false; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensusPlusPlus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_) { // The degrees of freedom of the data from which the model is estimated. // E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4. constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom(); // A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals constexpr double k = ModelEstimator::getSigmaQuantile(); // A multiplier to convert residual values to sigmas constexpr double threshold_to_sigma_multiplier = 1.0 / k; // Calculating k^2 / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double squared_k_per_2 = k * k / 2.0; // Calculating (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0; // TODO: check constexpr double C = ModelEstimator::getC(); // The size of a minimal sample used for the estimation constexpr size_t sample_size = estimator_.sampleSize(); // Calculating 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof = std::pow(2.0, dof_minus_one_per_two); // Calculating C * 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double C_times_two_ad_dof = C * two_ad_dof; // Calculating the gamma value of (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double gamma_value = tgamma(dof_minus_one_per_two); // Calculating the upper incomplete gamma value of (DoF - 1) / 2 with k^2 / 2. constexpr double gamma_k = ModelEstimator::getUpperIncompleteGammaOfK(); // Calculating the lower incomplete gamma value of (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double gamma_difference = gamma_value - gamma_k; // The number of points provided const int point_number = points_.rows; // The manually set maximum inlier-outlier threshold double current_maximum_sigma = this->maximum_threshold; // Calculating the pairs of (residual, point index). std::vector< std::pair<double, size_t> > residuals; // Occupy the maximum required memory to avoid doing it later. residuals.reserve(point_number); // If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better if (best_score_.inlier_number > 0) { // Number of points close to the previous so-far-the-best model. // This model should have more inliers. int points_remaining = best_score_.inlier_number; // Collect the points which are closer than the threshold which the maximum sigma implies for (int point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // all_residuals.emplace_back(std::make_pair(residual * threshold_to_sigma_multiplier, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) --points_remaining; } // Interrupt if there is no chance of being better // TODO: replace this part by SPRT test if (point_number - point_idx < points_remaining) return false; } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = best_score_.inlier_number - points_remaining; } else { // The number of really close points size_t points_close = 0; // Collect the points which are closer than the threshold which the maximum sigma implies for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; } // Models fit by weighted least-squares fitting std::vector<gcransac::Model> sigma_models; // Points used in the weighted least-squares fitting std::vector<size_t> sigma_inliers; // Weights used in the the weighted least-squares fitting std::vector<double> sigma_weights; // Number of points considered in the fitting const size_t possible_inlier_number = residuals.size(); // Occupy the memory to avoid doing it inside the calculation possibly multiple times sigma_inliers.reserve(possible_inlier_number); // Occupy the memory to avoid doing it inside the calculation possibly multiple times sigma_weights.reserve(possible_inlier_number); // Calculate 2 * \sigma_{max}^2 a priori const double squared_sigma_max_2 = current_maximum_sigma * current_maximum_sigma * 2.0; // Divide C * 2^(DoF - 1) by \sigma_{max} a priori const double one_over_sigma = C_times_two_ad_dof / current_maximum_sigma; // Calculate the weight of a point with 0 residual (i.e., fitting perfectly) a priori const double weight_zero = one_over_sigma * gamma_difference; // Initialize the polished model with the initial one gcransac::Model polished_model = model_; // A flag to determine if the initial model has been updated bool updated = false; // Do the iteratively re-weighted least squares fitting for (size_t iterations = 0; iterations < number_of_irwls_iters; ++iterations) { // If the current iteration is not the first, the set of possibly inliers // (i.e., points closer than the maximum threshold) have to be recalculated. if (iterations > 0) { // The number of points close to the model size_t points_close = 0; // Remove everything from the residual vector residuals.clear(); // Collect the points which are closer than the maximum threshold for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), polished_model); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; // Number of points closer than the threshold const size_t possible_inlier_number = residuals.size(); // Clear the inliers and weights sigma_inliers.clear(); sigma_weights.clear(); // Occupy the memory for the inliers and weights sigma_inliers.reserve(possible_inlier_number); sigma_weights.reserve(possible_inlier_number); } // Calculate the weight of each point for (size_t res_idx = 0; res_idx < residuals.size(); ++res_idx) { const std::pair<double, size_t> &pair = residuals[res_idx]; const double &residual = pair.first; const size_t &idx = pair.second; // The weight double weight = 0.0; // If the residual is ~0, the point fits perfectly and it is handled differently if (residual < std::numeric_limits<double>::epsilon()) weight = weight_zero; else { // Calculate the squared residual const double squared_residual = residual * residual; // Get the position of the gamma value in the lookup table size_t x = round(precision_of_stored_gammas * squared_residual / squared_sigma_max_2); // Put the index of the point into the vector of points used for the least squares fitting sigma_inliers.emplace_back(idx); // If the sought gamma value is not stored in the lookup, return the closest element if (stored_gamma_number < x) x = stored_gamma_number; // Calculate the weight of the point weight = one_over_sigma * (stored_gamma_values[x] - gamma_k); } // Store the weight of the point sigma_weights.emplace_back(weight); } // If there are fewer than the minimum point close to the model, // terminate. if (sigma_inliers.size() < sample_size) return false; // Estimate the model parameters using weighted least-squares fitting if (!estimator_.estimateModelNonminimal( points_, // All input points &(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier static_cast<int>(sigma_inliers.size()), // Number of possible inliers &sigma_models, // Estimated models &(sigma_weights)[0])) // Weights of points { // If the estimation failed and the iteration was never successfull, // terminate with failure. if (iterations == 0) return false; // Otherwise, if the iteration was successfull at least one, // simply break it. break; } // Update the model parameters polished_model = sigma_models[0]; // Clear the vector of models and keep only the best sigma_models.clear(); // The model has been updated updated = true; } bool is_model_updated = false; if (updated && // If the model has been updated estimator_.isValidModel(polished_model, points_, sigma_inliers, &(sigma_inliers[0]), interrupting_threshold, is_model_updated)) // and it is valid { // Return the refined model refined_model_ = polished_model; // Calculate the score of the model and the implied iteration number double marginalized_iteration_number; getModelQualityPlusPlus(points_, // All the input points refined_model_, // The estimated model estimator_, // The estimator score_.score, // The marginalized score best_score_.score); // The score of the previous so-far-the-best model // Update the iteration number last_iteration_number = log_confidence / log(1.0 - std::pow(static_cast<double>(score_.inlier_number) / point_number, sample_size)); return true; } return false; } template <class DatumType, class ModelEstimator> void MAGSAC<DatumType, ModelEstimator>::getModelQualityPlusPlus( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &score_, // The score to be calculated const double &previous_best_score_) // The score of the previous so-far-the-best model { // The degrees of freedom of the data from which the model is estimated. // E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4. constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom(); // A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals constexpr double k = ModelEstimator::getSigmaQuantile(); // A multiplier to convert residual values to sigmas constexpr double threshold_to_sigma_multiplier = 1.0 / k; // Calculating k^2 / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double squared_k_per_2 = k * k / 2.0; // Calculating (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0; // Calculating (DoF + 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_plus_one_per_two = (degrees_of_freedom + 1.0) / 2.0; // TODO: check constexpr double C = 0.25; // Calculating 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof_minus_one = std::pow(2.0, dof_minus_one_per_two); // Calculating 2^(DoF + 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof_plus_one = std::pow(2.0, dof_plus_one_per_two); // Calculate the gamma value of k constexpr double gamma_value_of_k = ModelEstimator::getUpperIncompleteGammaOfK(); // Calculate the lower incomplete gamma value of k constexpr double lower_gamma_value_of_k = ModelEstimator::getLowerIncompleteGammaOfK(); // The number of points provided const int point_number = points_.rows; // The previous best loss const double previous_best_loss = 1.0 / previous_best_score_; // Convert the maximum threshold to a sigma value const double maximum_sigma = threshold_to_sigma_multiplier * maximum_threshold; // Calculate the squared maximum sigma const double maximum_sigma_2 = maximum_sigma * maximum_sigma; // Calculate \sigma_{max}^2 / 2 const double maximum_sigma_2_per_2 = maximum_sigma_2 / 2.0; // Calculate 2 * \sigma_{max}^2 const double maximum_sigma_2_times_2 = maximum_sigma_2 * 2.0; // Calculate the loss implied by an outlier const double outlier_loss = maximum_sigma * two_ad_dof_minus_one * lower_gamma_value_of_k; // Calculating 2^(DoF + 1) / \sigma_{max} which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. const double two_ad_dof_plus_one_per_maximum_sigma = two_ad_dof_plus_one / maximum_sigma; // The loss which a point implies double loss = 0.0, // The total loss regarding the current model total_loss = 0.0; // Iterate through all points to calculate the implied loss for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residualForScoring(points_.row(point_idx), model_.descriptor); // If the residual is smaller than the maximum threshold, consider it outlier // and add the loss implied to the total loss. if (maximum_threshold < residual) loss = outlier_loss; else // Otherwise, consider the point inlier, and calculate the implied loss { // Calculate the squared residual const double squared_residual = residual * residual; // Divide the residual by the 2 * \sigma^2 const double squared_residual_per_sigma = squared_residual / maximum_sigma_2_times_2; // Get the position of the gamma value in the lookup table size_t x = round(precision_of_stored_incomplete_gammas * squared_residual_per_sigma); // If the sought gamma value is not stored in the lookup, return the closest element if (stored_incomplete_gamma_number < x) x = stored_incomplete_gamma_number; // Calculate the loss implied by the current point loss = maximum_sigma_2_per_2 * stored_lower_incomplete_gamma_values[x] + squared_residual / 4.0 * (stored_complete_gamma_values[x] - gamma_value_of_k); loss = loss * two_ad_dof_plus_one_per_maximum_sigma; } // Update the total loss total_loss += loss; // Break the validation if there is no chance of being better than the previous // so-far-the-best model. if (previous_best_loss < total_loss) break; } // Calculate the score of the model from the total loss score_ = 1.0 / total_loss; } template <class DatumType, class ModelEstimator> void MAGSAC<DatumType, ModelEstimator>::getModelQuality( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &marginalized_iteration_number_, // The marginalized iteration number to be calculated double &score_) // The score to be calculated { // Set up the parameters constexpr size_t sample_size = estimator_.sampleSize(); const size_t point_number = points_.rows; // Getting the inliers std::vector<std::pair<double, size_t>> all_residuals; all_residuals.reserve(point_number); double max_distance = 0; for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residualForScoring(points_.row(point_idx), model_.descriptor); // If the residual is smaller than the maximum threshold, add it to the set of possible inliers if (maximum_threshold > residual) { max_distance = MAX(max_distance, residual); all_residuals.emplace_back(std::make_pair(residual, point_idx)); } } // Set the maximum distance to be slightly bigger than that of the farthest possible inlier max_distance = max_distance + std::numeric_limits<double>::epsilon(); // Number of possible inliers const size_t possible_inlier_number = all_residuals.size(); // The extent of a partition const double threshold_step = max_distance / partition_number; // The maximum threshold considered in each partition std::vector<double> thresholds(partition_number); std::vector<double> thresholds_squared(partition_number); std::vector<double> thresholds_2_squared(partition_number); // Calculating the thresholds for each partition for (size_t i = 0; i < partition_number; ++i) { thresholds[i] = (i + 1) * threshold_step; thresholds_squared[i] = thresholds[i] * thresholds[i]; thresholds_2_squared[i] = 2 * thresholds_squared[i]; } double residual_i, // Residual of the i-th point residual_i_squared, // Squared residual of the i-th poin probability_i; // Probability of the i-th point given the model std::vector<double> inliers(partition_number, 0), // RANSAC score for each partition probabilities(partition_number, 1); // Probabilities for each partition for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx) { residual_i = all_residuals[point_idx].first; residual_i_squared = residual_i * residual_i; for (size_t i = 0; i < partition_number; ++i) { if (residual_i < thresholds[i]) { probability_i = 1.0 - residual_i_squared / thresholds_squared[i]; ++inliers[i]; probabilities[i] += probability_i; } } } score_ = 0; marginalized_iteration_number_ = 0.0; for (auto i = 0; i < partition_number; ++i) { score_ += probabilities[i]; marginalized_iteration_number_ += log_confidence / log(1.0 - std::pow(inliers[i] / point_number, sample_size)); } marginalized_iteration_number_ = marginalized_iteration_number_ / partition_number; }
19_omp_first_priv_nested.c
// clang-format off // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst // REQUIRES: openmp // clang-format on #include "omp.h" extern void MPI_Send(void*, int); void func(int* x, int* e) { // firstprivate > every thread has a private copy of addr(!) x // check-inst: define {{.*}} @func // check-inst-NOT: call void @__typeart_alloc_stack #pragma omp parallel for firstprivate(x), shared(e) for (int i = 0; i < 10; ++i) { // Analysis should not filter x, but e... MPI_Send((void*)x, *e); } } void foo() { // check-inst: define {{.*}} @foo // check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1) int x = 1; int y = 2; #pragma omp parallel { func(&x, &y); } } void func_other(int* x, int* e) { // firstprivate > every thread has a private copy of addr(!) x // check-inst: define {{.*}} @func_other // check-inst-NOT: call void @__typeart_alloc_stack #pragma omp parallel for firstprivate(x), shared(e) for (int i = 0; i < 10; ++i) { // Analysis should not filter x, but e... MPI_Send(x, *e); } MPI_Send(x, *e); } void bar(int x_other) { // check-inst: define {{.*}} @bar // check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1) int x = x_other; int y = 2; #pragma omp parallel { func_other(&x, &y); } } // CHECK: TypeArtPass [Heap & Stack] // CHECK-NEXT: Malloc : 0 // CHECK-NEXT: Free : 0 // CHECK-NEXT: Alloca : 2 // CHECK-NEXT: Global : 0
valid.res5.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_128_28_28_64_1_1.h" #include "gen_ukr_A4B2gemm_1_128_28_28_64_1_1.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 28; int Ny = 28; int Nh = 1; long long Astrides[6] = {0,2,4,6,8,10}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<64+0;c5+=64) { for(int f5=0;f5<128+0;f5+=128) { for(int xy5=0;xy5<784+0;xy5+=784) { for(int c4=c5;c4<min(64, 64+c5);c4+=64) { for(int xy4=xy5;xy4<min(784, 784+xy5);xy4+=784) { for(int f4=f5;f4<min(128, 128+f5);f4+=128) { for(int c3=c4;c3<min(64, 64+c4);c3+=Tc1) { for(int f3=f4;f3<min(128, 128+f4);f3+=Tf2) { for(int xy3=xy4;xy3<min(784, 784+xy4);xy3+=Txy3) { for(int xy2=xy3;xy2<min(784, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(128, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(64, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(64, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(784, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(128, 16+f2);f1+=16) { int ctile=min(Tc1, 64-c1); int x1=xy1/28; int y1=xy1%28/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*200704+c1_1*3136+2*x1*56+2*y1*1+c1_2*1; int offsetB=0+kf1_1*1024+c1*16+0*16+0*16+kf1_2*1; int offsetC=0+b1*100352+of1_1*784+x1*28+y1*1+of1_2*1; if(28-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(28*28-xy1>=6){ for(int sti=28-y1;sti<6;sti+=1) { Astrides[sti]+=56; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=28-y1;sti<6;sti+=1) { Astrides[sti]-=56; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
gt.region.c
/* * PROJECT: GEM-Tools library * FILE: gt.gtfcount.c * DATE: 10/07/2013 * AUTHOR(S): Thasso Griebel <thasso.griebel@gmail.com> * DESCRIPTION: Annotation map a file against a reference annotation */ #include <getopt.h> #include <omp.h> #include "gem_tools.h" typedef struct { char *input_file; char *output_file; char *annotation; char *gene_id; bool paired; uint64_t num_threads; } gt_region_args; gt_region_args parameters = { .input_file=NULL, .output_file=NULL, .annotation=NULL, .gene_id=NULL, .paired=false, .num_threads=1 }; GT_INLINE void gt_region_read(gt_gtf* const gtf) { // Open file IN/OUT gt_input_file* input_file = (parameters.input_file==NULL) ? gt_input_stream_open(stdin) : gt_input_file_open(parameters.input_file,false); gt_output_file* output_file = (parameters.output_file==NULL) ? gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.output_file,SORTED_FILE); // Parallel I/O #pragma omp parallel num_threads(parameters.num_threads) { gt_vector* hits = gt_vector_new(16, sizeof(gt_gtf_entry*)); gt_output_map_attributes* const output_map_attributes = gt_output_map_attributes_new(); GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired,buffered_output,template){ gt_vector_clear(hits); gt_gtf_search_template(gtf, hits, template); GT_VECTOR_ITERATE(hits, v, c, gt_gtf_entry*){ gt_gtf_entry* e = *v; if(parameters.gene_id != NULL && e->gene_id != NULL){ if(strcmp(e->gene_id->buffer, parameters.gene_id) == 0){ //gt_output_map_bofprint_gem_template(buffered_output, template, output_map_attributes); gt_output_map_fprint_gem_template(stdout, template, output_map_attributes); break; } } } }GT_END_READING_WRITING_LOOP(input_file,output_file,template); // cleanup per threads gt_vector_delete(hits); gt_output_map_attributes_delete(output_map_attributes); } // Clean global gt_input_file_close(input_file); gt_output_file_close(output_file); } void parse_arguments(int argc,char** argv) { struct option* gt_region_getopt = gt_options_adaptor_getopt(gt_region_options); gt_string* const gt_region_short_getopt = gt_options_adaptor_getopt_short(gt_region_options); int option, option_index; while (true) { // Get option & Select case if ((option=getopt_long(argc,argv, gt_string_get_string(gt_region_short_getopt),gt_region_getopt,&option_index))==-1) break; switch (option) { /* I/O */ case 'i': parameters.input_file = optarg; break; case 'o': parameters.output_file = optarg; break; case 'a': parameters.annotation = optarg; break; case 'p': parameters.paired = true; break; /* Misc */ case 'g': parameters.gene_id = optarg; break; case 't': parameters.num_threads = atol(optarg); break; case 'h': fprintf(stderr, "USE: gt.gtfcount [OPERATION] [ARGS]...\n"); gt_options_fprint_menu(stderr,gt_region_options,gt_region_groups,false,false); exit(1); case 'J': gt_options_fprint_json_menu(stderr,gt_region_options,gt_region_groups,true,false); exit(1); break; case '?': default: gt_fatal_error_msg("Option not recognized"); } } // Check parameters if (parameters.annotation==NULL) { gt_fatal_error_msg("Please specify a reference annotation"); } // Free gt_string_delete(gt_region_short_getopt); } int main(int argc,char** argv) { // GT error handler gt_handle_error_signals(); parse_arguments(argc,argv); // read gtf file gt_gtf* const gtf = gt_gtf_read_from_file(parameters.annotation, parameters.num_threads); gt_region_read(gtf); return 0; }
target-16.c
extern void abort (void); void foo (int n) { int a[n], i, err; for (i = 0; i < n; i++) a[i] = 7 * i; #pragma omp target firstprivate (a) map(from:err) private (i) { err = 0; for (i = 0; i < n; i++) if (a[i] != 7 * i) err = 1; } if (err) abort (); } void bar (int n) { int a[n], i, err; #pragma omp target private (a) map(from:err) { #pragma omp parallel for for (i = 0; i < n; i++) a[i] = 7 * i; err = 0; #pragma omp parallel for reduction(|:err) for (i = 0; i < n; i++) if (a[i] != 7 * i) err |= 1; } if (err) abort (); } int main () { foo (7); bar (7); return 0; }
GB_unaryop__minv_uint32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_uint64 // op(A') function: GB_tran__minv_uint32_uint64 // C type: uint32_t // A type: uint64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_uint64 ( uint32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB098-simd2-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* Two-dimension array computation with a vetorization directive collapse(2) makes simd associate with 2 loops. Loop iteration variables should be predetermined as lastprivate. */ int main() { int len = 100; double a[len][len], b[len][len], c[len][len]; int i, j; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<len; i ++ ) { #pragma cetus private(j) #pragma loop name main#0#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<len; j ++ ) { a[i][j]=(((double)i)/2.0); b[i][j]=(((double)i)/3.0); c[i][j]=(((double)i)/7.0); } } #pragma cetus private(i, j) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<len; i ++ ) { #pragma cetus private(j) #pragma loop name main#1#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<len; j ++ ) { c[i][j]=(a[i][j]*b[i][j]); } } printf("c[50][50]=%f\n", c[50][50]); _ret_val_0=0; return _ret_val_0; }
DRB054-inneronly2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Example with loop-carried data dependence at the outer level loop. The inner level loop can be parallelized. */ int main() { int i,j; int n=100, m=100; double b[n][m]; #pragma omp parallel for private(i ,j ) for(i=0;i<n; i++) #pragma omp parallel for private(j ) for(j=0;j<n; j++) b[i][j]=(double)(i*j); for (i=1;i<n;i++) #pragma omp parallel for private(j ) for (j=1;j<m;j++) b[i][j]=b[i-1][j-1]; for(i=0;i<n; i++) for(j=0;j<n; j++) printf("%d\n", b[i][j]); return 0; }
parallel_push_relabel.h
// // Created by Jan Groschaft on 12/11/18. // /* * Implementation of Goldberg-Tarjan's parallel push-relabel algorithm. Description can be found in * Goldberg, Andrew and Tarjan, Robert, A New Approach to the Maximum-Flow Problem, J. ACM, 1988. * * This implementation is also based on detailed pseudocode presented in * Baumstark, Niklas, Speeding up Maximum Flow Computations on Shared-Memory Platforms, KIT, Karlsruhe, 2014. */ #ifndef MAXFLOW_PARALLEL_PUSH_RELABEL_H #define MAXFLOW_PARALLEL_PUSH_RELABEL_H #include <memory> #include <chrono> #include <iostream> #include <atomic> #include <omp.h> #include <algorithm> #include "../../common_types.h" #include "../../data_structures/queue.h" #include "../../data_structures/thread_local_buffer_pool.h" #ifndef CACHE_LINE_SIZE #define CACHE_LINE_SIZE 64 #endif namespace parallel_push_relabel { template <template <class> typename vector, typename T, typename U> class max_flow_instance { struct alignas (CACHE_LINE_SIZE) vertex { U excess { 0 }; std::atomic<U> new_excess { 0 }; T label; T new_label; std::atomic_flag discovered = ATOMIC_FLAG_INIT; }; vector<vector<cached_edge<T, U>>> _residual_network; std::unique_ptr<vertex[]> _vertices; std::unique_ptr<T[]> _active { }; data_structures::thread_local_buffer_pool<T> _pool; T _source, _sink, _relabel_threshold, _active_cnt; std::size_t _relabel_progress; const T _thread_count; public: max_flow_instance ( vector<vector<cached_edge<T, U>>> graph, T source, T sink, std::size_t thread_count = static_cast<size_t>(omp_get_max_threads ()) ) : _residual_network ( std::move ( graph ) ), _vertices ( std::make_unique<vertex[]> ( _residual_network . size () ) ), _active ( std::make_unique<T[]> ( _residual_network . size () ) ), _pool ( data_structures::thread_local_buffer_pool<T> ( thread_count, _residual_network . size () ) ), _source ( source ), _sink ( sink ), _active_cnt ( 0 ), _relabel_progress ( 0 ), _thread_count ( thread_count ) { omp_set_num_threads ( static_cast<int> ( _thread_count ) ); init (); } uint64_t _phase_cnt = 0; uint64_t _push_cnt = 0; uint64_t _global_update_cnt = 0; U find_max_flow ( ) noexcept { find_max_flow_inner (); #ifdef DEBUG std::cout << "global updates:\t" << _global_update_cnt << std::endl; std::cout << "phase cnt: " << _phase_cnt << std::endl; std::cout << "pushes: " << _push_cnt << std::endl; #endif return _vertices[_sink] . new_excess + _vertices[_sink] . excess; } void preflow_to_flow ( ) { std::swap ( _source, _sink ); find_max_flow_inner (); std::swap ( _source, _sink ); #ifdef DEBUG for ( std::size_t i = 0; i < _residual_network . size(); ++i ) if ( i != _source && i != _sink ) if ( _vertices[i] . excess > 0 ) std::cerr << "Excess violation: vertex " << i << ", excess " << _vertices[i] . excess << '\n'; #endif } auto steal_network ( ) { return std::move ( _residual_network ); } private: static constexpr T ALPHA = 6, BETA = 12; static constexpr double GLOBAL_RELABEL_FREQ = 0.5; void init ( ) noexcept { #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network[_source] . size (); ++i ) { auto & edge = _residual_network[_source][i]; _vertices[edge . dst_vertex] . excess = edge . r_capacity; edge . reverse_r_capacity += edge . r_capacity; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += edge . r_capacity; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= edge . r_capacity; edge . r_capacity = 0; } T m = 0; for ( std::size_t i = 0; i < _residual_network . size (); ++i ) m += _residual_network[i] . size (); _relabel_threshold = _residual_network . size () * ALPHA + m / 2; } void find_max_flow_inner ( ) { global_relabel (); for ( ;; ) { if ( _active_cnt == 0 ) return; ++_phase_cnt; uint64_t push_cnt_per_phase = 0; #pragma omp parallel { #pragma omp for schedule(static) reduction(+:push_cnt_per_phase) for ( T i = 0; i < _active_cnt; ++i ) { auto thr_id = omp_get_thread_num (); auto vertex = _active[i]; if ( _vertices[vertex] . label == _residual_network . size () ) continue; push ( vertex, _vertices[vertex] . label, thr_id, push_cnt_per_phase ); } //stage 2 #pragma omp for schedule(static) reduction(+:_relabel_progress) for ( T i = 0; i < _active_cnt; ++i ) { auto thr_id = omp_get_thread_num (); auto vertex = _active[i]; relabel ( vertex, thr_id, _relabel_progress ); } //stage 3 #pragma omp for schedule(static) for ( T i = 0; i < _active_cnt; ++i ) { auto vertex = _active[i]; _vertices[vertex] . label = _vertices[vertex] . new_label; _vertices[vertex] . discovered . clear ( std::memory_order_relaxed ); } //stage 4 #pragma omp single _active_cnt = _pool . swap_data ( _active ); #pragma omp for schedule(static) for ( T i = 0; i < _active_cnt; ++i ) { auto vertex = _active[i]; _vertices[vertex] . excess += _vertices[vertex] . new_excess . load ( std::memory_order_relaxed ); _vertices[vertex] . new_excess . store ( 0, std::memory_order_relaxed ); _vertices[vertex] . discovered . clear ( std::memory_order_relaxed ); } } if ( _relabel_progress * GLOBAL_RELABEL_FREQ >= _relabel_threshold || push_cnt_per_phase == 0 ) { _relabel_progress = 0; global_relabel (); } _push_cnt += push_cnt_per_phase; } } inline void push ( const T vertex, const T label, int thr_id, uint64_t & push_cnt ) noexcept { const auto target_label = label - 1; for ( auto & edge : _residual_network[vertex] ) { if ( edge . r_capacity > 0 && _vertices[edge . dst_vertex] . label == target_label ) { auto flow = std::min ( _vertices[vertex] . excess, edge . r_capacity ); if ( edge . dst_vertex != _source && edge . dst_vertex != _sink ) if ( !_vertices[edge . dst_vertex] . discovered . test_and_set ( std::memory_order_relaxed ) ) _pool . push_back ( edge . dst_vertex, static_cast<size_t>(thr_id) ); ++push_cnt; _vertices[vertex] . excess -= flow; _vertices[edge . dst_vertex] . new_excess . fetch_add ( flow, std::memory_order_relaxed ); edge . r_capacity -= flow; edge . reverse_r_capacity += flow; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= flow; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += flow; if ( _vertices[vertex] . excess == 0 ) return; } } } inline void relabel ( const T vertex, const int thr_id, std::size_t & relabel_progress ) noexcept { if ( _vertices[vertex] . excess > 0 || _vertices[vertex] . label == _residual_network . size () ) { relabel_progress += BETA; _vertices[vertex] . new_label = calculate_new_label ( vertex ); relabel_progress += _residual_network[vertex] . size (); if ( _vertices[vertex] . new_label == _residual_network . size () ) { _vertices[vertex] . excess += _vertices[vertex] . new_excess; _vertices[vertex] . new_excess = 0; return; } if ( !_vertices[vertex] . discovered . test_and_set ( std::memory_order_relaxed ) ) _pool . push_back ( vertex, static_cast<size_t>(thr_id) ); } else _vertices[vertex] . new_label = _vertices[vertex] . label; } inline T calculate_new_label ( const T vertex ) noexcept { T increase_to = _residual_network . size () - 1; for ( auto & edge : _residual_network[vertex] ) { if ( edge . r_capacity == 0 ) continue; increase_to = std::min ( increase_to, _vertices[edge . dst_vertex] . label ); } return increase_to + 1; } void global_relabel ( ) noexcept { ++_global_update_cnt; const auto not_reached = _residual_network . size (); #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network . size (); ++i ) _vertices[i] . label = not_reached; _vertices[_sink] . label = 0; _vertices[_sink] . discovered . test_and_set (); assert ( _pool . empty () ); _active[0] = _sink; std::size_t current_queue_size = 1; T current_distance = 0; while ( current_queue_size > 0 ) { #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < current_queue_size; ++i ) { auto thr_id = omp_get_thread_num (); auto current_vertex = _active[i]; for ( auto edge : _residual_network[current_vertex] ) { if ( edge . reverse_r_capacity > 0 ) { if ( !_vertices[edge . dst_vertex] . discovered . test_and_set ( std::memory_order_relaxed ) ) { _vertices[edge . dst_vertex] . label = current_distance + 1; _pool . push_back ( edge . dst_vertex, static_cast<std::size_t>(thr_id) ); } } } } current_queue_size = _pool . swap_data ( _active ); ++current_distance; } #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network . size (); ++i ) { auto thr_id = omp_get_thread_num (); if ( _vertices[i] . label != not_reached && _vertices[i] . excess > 0 && i != _sink ) _pool . push_back ( i, static_cast<size_t>(thr_id) ); _vertices[i] . discovered . clear ( std::memory_order_relaxed ); } _active_cnt = _pool . swap_data ( _active ); } }; } #endif //MAXFLOW_PARALLEL_PUSH_RELABEL_H
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_unaryop__identity_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_uint32 // op(A') function: GB_tran__identity_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_uint32 ( uint32_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
core_zgeqrt.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_geqrt * * Computes a QR factorization of an m-by-n tile A: * The factorization has the form * \f[ * A = Q \times R * \f] * The tile Q is represented as a product of elementary reflectors * \f[ * Q = H(1) H(2) ... H(k), * \f] * where \f$ k = min(m,n) \f$. * * Each \f$ H(i) \f$ has the form * \f[ * H(i) = I - \tau \times v \times v^H * \f] * where \f$ tau \f$ is a scalar, and \f$ v \f$ is a vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), * and \f$ tau \f$ in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A. m >= 0. * * @param[in] n * The number of columns of the tile A. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, the elements on and above the diagonal of the array * contain the min(m,n)-by-n upper trapezoidal tile R (R is * upper triangular if m >= n); the elements below the diagonal, * with the array tau, represent the unitary tile Q as a * product of elementary reflectors (see Further Details). * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * The ib-by-n triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliary workspace array of length n. * * @param work * Auxiliary workspace array of length ib*n. * * @param[in] lwork * Size of the array work. Should be at least ib*n. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ int core_zgeqrt(int m, int n, int ib, plasma_complex64_t *A, int lda, plasma_complex64_t *T, int ldt, plasma_complex64_t *tau, plasma_complex64_t *work) { // Check input arguments. if (m < 0) { coreblas_error("illegal value of m"); return -1; } if (n < 0) { coreblas_error("illegal value of n"); return -2; } if ((ib < 0) || ( (ib == 0) && (m > 0) && (n > 0) )) { coreblas_error("illegal value of ib"); return -3; } if (A == NULL) { coreblas_error("NULL A"); return -4; } if (lda < imax(1, m) && m > 0) { coreblas_error("illegal value of lda"); return -5; } if (T == NULL) { coreblas_error("NULL T"); return -6; } if (ldt < imax(1, ib) && ib > 0) { coreblas_error("illegal value of ldt"); return -7; } if (tau == NULL) { coreblas_error("NULL tau"); return -8; } if (work == NULL) { coreblas_error("NULL work"); return -9; } // quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; int k = imin(m, n); for (int i = 0; i < k; i += ib) { int sb = imin(ib, k-i); LAPACKE_zgeqr2_work(LAPACK_COL_MAJOR, m-i, sb, &A[lda*i+i], lda, &tau[i], work); LAPACKE_zlarft_work(LAPACK_COL_MAJOR, lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m-i, sb, &A[lda*i+i], lda, &tau[i], &T[ldt*i], ldt); if (n > i+sb) { LAPACKE_zlarfb_work(LAPACK_COL_MAJOR, lapack_const(PlasmaLeft), lapack_const(Plasma_ConjTrans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m-i, n-i-sb, sb, &A[lda*i+i], lda, &T[ldt*i], ldt, &A[lda*(i+sb)+i], lda, work, n-i-sb); } } return PlasmaSuccess; } /******************************************************************************/ void core_omp_zgeqrt(int m, int n, int ib, plasma_complex64_t *A, int lda, plasma_complex64_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A[0:lda*n]) \ depend(out:T[0:ib*n]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]); // Call the kernel. int info = core_zgeqrt(m, n, ib, A, lda, T, ldt, tau, tau+n); if (info != PlasmaSuccess) { plasma_error("core_zgeqrt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
calculate_E_field_flat_all_in_one.h
REAL HLLE_solve(REAL F0B1_r, REAL F0B1_l, REAL U_r, REAL U_l) { // Eq. 3.15 of https://epubs.siam.org/doi/abs/10.1137/1025002?journalCode=siread // F_HLLE = (c_min F_R + c_max F_L - c_min c_max (U_R-U_L)) / (c_min + c_max) return 0.5*(F0B1_r+F0B1_l-(U_r-U_l)); // FIXME: Curved space implementation! } /* Calculate the electric flux on both faces in the input direction. The input count is an integer that is either 0 or 1. If it is 0, this implies that the components are input in order of a backwards permutation and the final results will need to be multiplied by -1.0. If it is 1, then the permutation is forwards. */ void calculate_E_field_flat_all_in_one(const paramstruct *params, const REAL *Vr0,const REAL *Vr1, const REAL *Vl0,const REAL *Vl1, const REAL *Br0,const REAL *Br1, const REAL *Bl0,const REAL *Bl1, const REAL *Brflux_dirn, const REAL *Blflux_dirn, REAL *A2_rhs,const REAL SIGN,const int flux_dirn) { // FIXME: include metric functions! // This function is written to be generic and compute the contribution for all three AD RHSs. // However, for convenience, the notation used in the function itself is for the contribution // to AD2, specifically the [F_HLL^x(B^y)]_z term, with reconstructions in the x direction. This // corresponds to flux_dirn=0 and count=1 (which corresponds to SIGN=+1.0). // Thus, Az(i,j,k) += 0.25 ( [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k)) are solved here. // The other terms are computed by cyclically permuting the indices when calling this function. #include "GiRaFFE_standalone_Ccodes/set_Cparameters.h" #pragma omp parallel for for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) { for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) { for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0++) { // First, we set the index from which we will read memory. indexp1 is incremented by // one point in the direction of reconstruction. These correspond to the faces at at // i-1/2 and i+1/2, respectively. // Now, we read in memory. We need the x and y components of velocity and magnetic field on both // the left and right sides of the interface at *both* faces. // Here, the point (i0,i1,i2) corresponds to the point (i-1/2,j,k) const int index = IDX3S(i0,i1,i2); const double Valenciav_rU0 = Vr0[index]; const double Valenciav_rU1 = Vr1[index]; const double B_rU0 = Br0[index]; const double B_rU1 = Br1[index]; const double B_rflux_dirn = Brflux_dirn[index]; const double Valenciav_lU0 = Vl0[index]; const double Valenciav_lU1 = Vl1[index]; const double B_lU0 = Bl0[index]; const double B_lU1 = Bl1[index]; const double B_lflux_dirn = Blflux_dirn[index]; // ******************************* // REPEAT ABOVE, but at i+1, which corresponds to point (i+1/2,j,k) // Recall that the documentation here assumes flux_dirn==0, but the // algorithm is generalized so that any flux_dirn or velocity/magnetic // field component can be computed via permuting the inputs into this // function. const int indexp1 = IDX3S(i0+(flux_dirn==0),i1+(flux_dirn==1),i2+(flux_dirn==2)); const double Valenciav_rU0_p1 = Vr0[indexp1]; const double Valenciav_rU1_p1 = Vr1[indexp1]; const double B_rU0_p1 = Br0[indexp1]; const double B_rU1_p1 = Br1[indexp1]; const double B_rflux_dirn_p1 = Brflux_dirn[indexp1]; const double Valenciav_lU0_p1 = Vl0[indexp1]; const double Valenciav_lU1_p1 = Vl1[indexp1]; const double B_lU0_p1 = Bl0[indexp1]; const double B_lU1_p1 = Bl1[indexp1]; const double B_lflux_dirn_p1 = Blflux_dirn[indexp1]; // ******************************* // DEBUGGING: // if(flux_dirn==0 && SIGN>0 && i1==Nxx_plus_2NGHOSTS1/2 && i2==Nxx_plus_2NGHOSTS2/2) { // printf("index=%d & indexp1=%d\n",index,indexp1); // } // Since we are computing A_z, the relevant equation here is: // -E_z(x_i,y_j,z_k) = 0.25 ( [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k) // -[F_HLL^y(B^x)]_z(i,j+1/2,k)-[F_HLL^y(B^x)]_z(i,j-1/2,k) ) // We will construct the above sum one half at a time, first with SIGN=+1, which // corresponds to flux_dirn = 0, count=1, and // takes care of the terms: // [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k) // ( Note that we will repeat the above with flux_dirn = 1, count = 0, with SIGN=-1 // AND with the input components switched (x->y,y->x) so that we get the term // -[F_HLL^y(B^x)]_z(i,j+1/2,k)-[F_HLL^y(B^x)]_z(i,j-1/2,k) // thus completing the above sum. ) // Here, [F_HLL^i(B^j)]_k = (v^i B^j - v^j B^i) in general. // Calculate the flux vector on each face for each component of the E-field: // The F(B) terms are as Eq. 6 in Giacomazzo: https://arxiv.org/pdf/1009.2468.pdf // [F^i(B^j)]_k = \sqrt{\gamma} (v^i B^j - v^j B^i) // Therefore since we want [F_HLL^x(B^y)]_z, // we will code (v^x B^y - v^y B^x) on both left and right faces. const REAL F0B1_r = (Valenciav_rU0*B_rU1 - Valenciav_rU1*B_rU0); const REAL F0B1_l = (Valenciav_lU0*B_lU1 - Valenciav_lU1*B_lU0); // ZACH SAYS: Make sure the below is documented! // Compute the state vector for this flux direction // We must also multiply by sign so that we use the positive for the forward permutation // and negative for the backwards permutation. For Az, that means that we add +By and -Bx, // exactly as is done in the original GiRaFFE's A_i_rhs_no_gauge_terms.C, in line with // Del Zanna, 2003 [https://arxiv.org/pdf/astro-ph/0210618.pdf], Eq. 44 const REAL U_r = B_rflux_dirn; //B_rU0; const REAL U_l = B_lflux_dirn; // Basic HLLE solver: const REAL FHLL_0B1 = HLLE_solve(F0B1_r, F0B1_l, U_r, U_l); // ************************************ // ************************************ // REPEAT ABOVE, but at point i+1 // Calculate the flux vector on each face for each component of the E-field: const REAL F0B1_r_p1 = (Valenciav_rU0_p1*B_rU1_p1 - Valenciav_rU1_p1*B_rU0_p1); const REAL F0B1_l_p1 = (Valenciav_lU0_p1*B_lU1_p1 - Valenciav_lU1_p1*B_lU0_p1); // Compute the state vector for this flux direction const REAL U_r_p1 = B_rflux_dirn_p1; const REAL U_l_p1 = B_lflux_dirn_p1; //const REAL U_r_p1 = B_rU1_p1; //const REAL U_l_p1 = B_lU1_p1; // Basic HLLE solver, but at the next point: const REAL FHLL_0B1p1 = HLLE_solve(F0B1_r_p1, F0B1_l_p1, U_r_p1, U_l_p1); // ************************************ // ************************************ // With the Riemann problem solved, we add the contributions to the RHSs: // -E_z(x_i,y_j,z_k) &= 0.25 ( [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k) // -[F_HLL^y(B^x)]_z(i,j+1/2,k)-[F_HLL^y(B^x)]_z(i,j-1/2,k) ) // (Eq. 11 in https://arxiv.org/pdf/1009.2468.pdf) // This code, as written, solves the first two terms for flux_dirn=0. Calling this function for count=1 // flips x for y to solve the latter two, switching to SIGN=-1 as well. // Here, we finally add together the output of the HLLE solver at i-1/2 and i+1/2 // We also multiply by the SIGN dictated by the order of the input vectors and divide by 4. A2_rhs[index] += SIGN*0.25*(FHLL_0B1 + FHLL_0B1p1); // flux dirn = 0 ===================> i-1/2 i+1/2 // Eq 11 in Giacomazzo: // -FxBy(avg over i-1/2 and i+1/2) + FyBx(avg over j-1/2 and j+1/2) // Eq 6 in Giacomazzo: // FxBy = vxBy - vyBx // -> // FHLL_0B1 = vyBx - vxBy } // END LOOP: for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0++) } // END LOOP: for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) } // END LOOP: for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) }
matrix.c
#include "matrix.h" /* * matrix.c * * Copyright (c) 2014, Rafat Hussain * License : BSD 3-Clause * See COPYRIGHT for more details */ typedef struct { float* a; int b; } vipair; float macheps() { float macheps; macheps = 1.0; while ((macheps + 1.0) > 1.0) { macheps = macheps / 2.0f; } macheps = macheps * 2; return macheps; } float pmax(float a, float b) { if (a > b) { return a; } else { return b; } } float pmin(float a, float b) { if (a < b) { return a; } else { return b; } } int imax(int a, int b) { if (a > b) { return a; } else { return b; } } int imin(int a, int b) { if (a < b) { return a; } else { return b; } } float signx(float x) { float sgn; if (x >= 0.) { sgn = 1.0; } else { sgn = -1.0; } return sgn; } float l2norm(float *vec, int N) { float l2, sum; int i; sum = 0.; for (i = 0; i < N; ++i) { sum += vec[i] * vec[i]; } l2 = sqrtf(sum); return l2; } int compare (const void* ind1, const void* ind2) { if (*((vipair *)ind1)->a > *((vipair *)ind2)->a) return -1; else if (*((vipair *)ind1)->a < *((vipair *)ind2)->a) return 1; else return 0; } void sort1d(float* v,int N, int* pos) { vipair* val = NULL; int i; if (N <= 0) return; val = malloc(sizeof(vipair) * N); for (i = 0; i < N; ++i) { val[i].a = &v[i]; val[i].b = i; } qsort(val, N, sizeof(vipair), compare); for (i = 0; i < N; ++i) pos[i] = val[i].b; free(val); } float array_max_abs(float *array,int N) { int i; float m = 0.0; for (i = 0; i < N;++i) { if (fabs(array[i]) > m ) { m = fabsf(array[i]); } } return m; } float array_max(float *array,int N) { int i; float m = array[0]; for (i = 1; i < N;++i) { if (array[i] > m ) { m = array[i]; } } return m; } float array_min(float *array,int N) { int i; float m = array[0]; for (i = 1; i < N;++i) { if (array[i] < m ) { m = array[i]; } } return m; } void dtranspose(float *sig, int rows, int cols,float *col) { int max,ud,i,k; if (rows >= cols) { max = cols; } else { max = rows; } ud = 0; for (i= -rows + 1; i < cols; i++) { if (i <= 0) { ud++; if (ud >= max) ud = max; for (k = 0; k < ud; k++) { col[k*rows+k-i] = sig[(k-i)*cols+k]; } } else { if (i - cols + rows > 0) { ud--; if (ud >= max) ud = max; } for (k = 0; k < ud; k++) { col[(k+i)*rows+k] = sig[k*cols+k+i]; } } } } void stranspose(float *sig, int rows, int cols,float *col) { int t,u; register int i,j; // #pragma omp parallel for private(i,j,t,u) for (i=0; i < rows; i++) { t = i * cols; u = 0; for (j=0; j < cols; j++) { col[u+i] = sig[j+t]; u+=rows; } } } void rtranspose(float *m, int rows, int cols,float *n, int r, int c) { register int i,j; int rm,cm; int rm1,cm1,rm2,cm2; int block; block = (int) BLOCKSIZE; if (rows <= block && cols <= block) { for (i = 0; i < rows; ++i) { for (j = 0; j < cols; ++j) { n[i+j*r] = m[j+i*c]; //cout << *(n+i+j*r) << " "; } } //cout << endl; } else if (cols >= rows) { rm = rows; cm1 = (int) ceil((float) cols/2.0); cm2 = cols - cm1; rtranspose(m,rm,cm1,n,r,c); rtranspose(m+cm1,rm,cm2,n+cm1*r,r,c); } else if (rows > cols) { rm1 = (int) ceil((float) rows/2.0); rm2 = rows - rm1; cm = cols; rtranspose(m,rm1,cm,n,r,c); rtranspose(m+rm1*c,rm2,cm,n+rm1,r,c); } } void ctranspose(float *sig, int rows, int cols,float *col) { int r,c; int block; block = (int) TBLOCK; r= rows; c = cols; if (rows >= block || cols >= block) { rtranspose(sig,rows,cols,col,r,c); } else { stranspose(sig,rows,cols,col); } } void mtranspose(float *sig, int rows, int cols,float *col) { int block; block = (int) TBLOCK; if (rows >= block && cols >= block) { ctranspose(sig,rows,cols,col); } else { stranspose(sig,rows,cols,col); } } void itranspose(float *A, int M, int N) { int i, j, p, iter; float *buf; float temp; if (M == N) { for (i = 0; i < N; ++i) { for (j = i + 1; j < N; ++j) { temp = A[i + j*N]; A[i + j*N] = A[j + i*N]; A[j + i*N] = temp; } } } else if (M > N) { p = M - N; buf = (float*)malloc(sizeof(float)* p * N); memcpy(buf, A + N * N, sizeof(*A)*p*N); for (i = 0; i < N; ++i) { for (j = i + 1; j < N; ++j) { temp = A[i + j*N]; A[i + j*N] = A[j + i*N]; A[j + i*N] = temp; } } for (i = N - 1; i >= 1; --i) { memmove(A + i*M, A + i*N, sizeof(*A)*M); } for (i = 0; i < N; ++i) { iter = N + i * M; for (j = 0; j < p; ++j) { A[iter + j] = buf[j*N + i]; } } free(buf); } else if (M < N) { p = N - M; buf = (float*)malloc(sizeof(float)* p * M); for (i = 0; i < M; ++i) { iter = M + i*N; for (j = 0; j < p; ++j) { buf[j*M + i] = A[iter + j]; } } for (i = 1; i < M; ++i) { memmove(A + i*M, A + i * N, sizeof(*A)*M); } for (i = 0; i < M; ++i) { for (j = i + 1; j < M; ++j) { temp = A[i + j*M]; A[i + j*M] = A[j + i*M]; A[j + i*M] = temp; } } memcpy(A + M*M, buf, sizeof(*A)*p*M); free(buf); } } void mdisplay(float *A, int row, int col) { int i,j; printf("\n MATRIX Order : %d X %d \n \n",row,col); for (i = 0; i < row; i++) { printf("R%d: ",i); for ( j = 0; j < col;j++) { printf("%f ",A[i*col + j]); } printf(":R%d \n",i); } } void madd(float* A, float* B, float* C,int rows,int cols) { int N,i; /* * C = A + B . All matrices have identical dimensions rows X cols */ N = rows * cols; #pragma omp parallel for for (i = 0; i < N; ++i) { C[i] = A[i] + B[i]; } } void msub(float* A, float* B, float* C,int rows,int cols) { int N,i; /* * C = A - B . All matrices have identical dimensions rows X cols */ N = rows * cols; #pragma omp parallel for for (i = 0; i < N; ++i) { C[i] = A[i] - B[i]; } } void scale(float *A, int rows, int cols, float alpha) { int N,i; /* * A = alpha * A * Matrix A is overwritten. */ N = rows * cols; #pragma omp parallel for for (i = 0; i < N;++i) { A[i] = alpha * A[i]; } } void nmult(float* A, float* B, float* C,int ra,int ca, int cb) { register int i,j,k; int u,v,t,rb; /* * C = A * B , where A is a ra*ca matric while B is a rb*cb * with ca = rb * Matrix C is a ra*cb matrix */ rb = ca; #pragma omp parallel for private(i,j,k,v,u,t) for (i = 0; i < ra; ++i) { for (j = 0; j < cb; ++j) { v = i * rb; u = i *cb; t = j + u; C[t] = 0.; for (k = 0; k < rb;++k) { C[t] += A[k + v] * B[j + k * cb]; } } } } void tmult(float* A, float* B, float* C,int ra,int ca, int cb) { register int i,j,k; int u,v,t,rb; float *BT; BT = (float*) malloc(sizeof(float) * ca * cb); /* * C = A * B , where A is a ra*ca matric while B is a rb*cb * with ca = rb * Matrix C is a ra*cb matrix */ mtranspose(B,ca,cb,BT); rb = ca; #pragma omp parallel for private(i,j,k,v,u,t) for (i = 0; i < ra; ++i) { for (j = 0; j < cb; ++j) { v = i * rb; u = i *cb; t = j + u; C[t] = 0.; for (k = 0; k < rb;++k) { C[t] += A[k + v] * BT[k + j * rb]; } } } free(BT); } void recmult(float* A, float* B, float* C,int m,int n, int p,int sA,int sB, int sC) { int m2,n2,p2; register int i,j,k; int u,v,t; if (m + n + p <= CUTOFF) { //#pragma omp parallel for private(i,j,k,v,u,t) for (i = 0; i < m; ++i) { for (j = 0; j < p; ++j) { v = i * sB; u = i * sC; t = j + u; for (k = 0; k < n;++k) { C[t] += A[k + v] * B[j + k * sC]; } } } } else if (m >= n && m >= p) { m2 = (int) ceil((float) m / 2.0); recmult(A,B,C,m2,n,p,sA,sB,sC); recmult(A + m2*sB,B,C + m2*sC,m-m2,n,p,sA,sB,sC); } else if (n >= m && n >= p) { n2 = (int) ceil((float) n / 2.0); recmult(A,B,C,m,n2,p,sA,sB,sC); recmult(A+n2,B+n2*sC,C,m,n-n2,p,sA,sB,sC); } else if (p >= m && p >= n) { p2 = (int) ceil((float) p / 2.0); recmult(A,B,C,m,n,p2,sA,sB,sC); recmult(A,B+p2,C+p2,m,n,p-p2,sA,sB,sC); } } void rmult(float* A, float* B, float* C,int m,int n, int p) { int strA,strB,strC; int N; register int i; strA = m; strB = n; strC = p; N = m * p; for(i = 0; i < N; ++i) { C[i] = 0.; } recmult(A,B,C,m,n,p,strA,strB,strC); } int findrec(int *a, int *b, int *c) { int rec; float da,db,dc,mul; da = (float) *a; db = (float) *b; dc = (float) *c; rec = 0; mul = 1.; while (da + db + dc > (float) CUTOFF) { rec++; mul *= 2; da = ceilf(da/2.0f); db = ceilf(db/2.0f); dc = ceilf(dc/2.0f); } *a = (int) da * mul; *b = (int) db * mul; *c = (int) dc * mul; return rec; } void add_zero_pad(float *X, int rows, int cols, int zrow, int zcol,float *Y) { int r,c,i,j,u,v; r = rows + zrow; c = cols + zcol; for (i = 0; i < rows;++i) { u = i*c; v = i * cols; for (j = 0; j < cols;++j) { Y[u + j] = X[v + j]; } for (j = cols; j < c;++j) { Y[u + j] = 0.; } } for (i = rows; i < r;++i) { u = i*c; for(j = 0; j < c;++j) { Y[u + j] = 0.; } } } void remove_zero_pad(float *Y, int rows, int cols, int zrow, int zcol,float *Z) { int r,c,i,j,u,v; r = rows - zrow; c = cols - zcol; for (i = 0; i < r; ++i) { u = i * c; v = i * cols; for (j = 0; j < c; ++j) { Z[j + u] = Y[j + v]; } } } void madd_stride(float* A, float* B, float* C,int rows,int cols,int sA,int sB,int sC) { int i,j,u,v,w; for (i = 0; i < rows; ++i) { u = i * sC; v = i * sA; w = i * sB; for(j = 0; j < cols;j++) { C[j + u] = A[j + v] + B[j + w]; } } } void msub_stride(float* A, float* B, float* C,int rows,int cols,int sA,int sB,int sC) { int i,j,u,v,w; for (i = 0; i < rows; ++i) { u = i * sC; v = i * sA; w = i * sB; for(j = 0; j < cols;j++) { C[j + u] = A[j + v] - B[j + w]; } } } void rmadd_stride(float* A, float* B, float* C,int rows,int cols,int p,int sA,int sB,int sC) { int i,j,u,v,w; if (rows + cols + p <= CUTOFF) { for (i = 0; i < rows; ++i) { u = i * sC; v = i * sA; w = i * sB; for(j = 0; j < cols;j++) { C[j + u] = A[j + v] + B[j + w]; } } } else { rows/=2;cols/=2;p/=2; rmadd_stride(A,B,C,rows,cols,p,sA,sB,sC); rmadd_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC); rmadd_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC); rmadd_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC); } } void rmsub_stride(float* A, float* B, float* C,int rows,int cols,int p,int sA,int sB,int sC) { int i,j,u,v,w; if (rows + cols + p <= CUTOFF) { for (i = 0; i < rows; ++i) { u = i * sC; v = i * sA; w = i * sB; for(j = 0; j < cols;j++) { C[j + u] = A[j + v] - B[j + w]; } } } else { rows/=2;cols/=2;p/=2; rmsub_stride(A,B,C,rows,cols,p,sA,sB,sC); rmsub_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC); rmsub_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC); rmsub_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC); } } void srecmult(float* A, float* B, float* C,int m,int n, int p,int sA,int sB, int sC) { register int i,j,k; int u,v,t; float sum; float *A1,*B1; float *a11,*a12,*a21,*a22; float *b11,*b12,*b21,*b22; float *c11,*c12,*c21,*c22; float *m1,*m2,*m3,*m4,*m5,*m6,*m7; int sm1,sm2,sm3,sm4,sm5,sm6,sm7; int sA1,sB1; if (m + n + p <= CUTOFF) { for (i = 0; i < m; ++i) { for (j = 0; j < p; ++j) { v = i * sA; u = i * sC; t = j + u; sum = 0.; for (k = 0; k < n;++k) { sum += A[k + v] * B[j + k * sB]; } C[t] = sum; } } } else { m/=2;n/=2;p/=2; // A size mXn, C size mXp a11 = A; a12 = A + n; a21 = A + m * sA; a22 = A + n + m * sA; //B size nXp b11 = B; b12 = B + p; b21 = B + n * sB; b22 = B + p + n * sB; //C size mXp c11 = C; c12 = C + p; c21 = C + m * sC; c22 = C + p + m * sC; // m matrices have dimension m X p each. See http://en.wikipedia.org/wiki/Strassen_algorithm m1 = (float*) malloc(sizeof(float) *m * p); sm1 = p; m3 = (float*) malloc(sizeof(float) *m * p); sm3 = p; m4 = (float*) malloc(sizeof(float) *m * p); sm4 = p; m2 = c21; sm2 = sC; m5 = c12; sm5 = sC; m6 = c22; sm6 = sC; m7 = c11; sm7 = sC; //m1 sA1 = n; sB1 = p; A1 = (float*) malloc(sizeof(float) * m * n); B1 = (float*) malloc(sizeof(float) * n * p); madd_stride(a11,a22,A1,m,n,sA,sA,sA1); madd_stride(b11,b22,B1,n,p,sB,sB,sB1); srecmult(A1,B1,m1,m,n,p,sA1,sB1,sm1); free(A1); free(B1); //m2 A1 = (float*) malloc(sizeof(float) * m * n); madd_stride(a21,a22,A1,m,n,sA,sA,sA1); srecmult(A1,b11,m2,m,n,p,sA1,sB,sm2); free(A1); //m3 B1 = (float*) malloc(sizeof(float) * n * p); //rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2); msub_stride(b12,b22,B1,n,p,sB,sB,sB1); srecmult(a11,B1,m3,m,n,p,sA,sB1,sm3); free(B1); //m4 B1 = (float*) malloc(sizeof(float) * n * p); //rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2); msub_stride(b21,b11,B1,n,p,sB,sB,sB1); srecmult(a22,B1,m4,m,n,p,sA,sB1,sm4); free(B1); //m5 A1 = (float*) malloc(sizeof(float) * m * n); madd_stride(a11,a12,A1,m,n,sA,sA,sA1); srecmult(A1,b22,m5,m,n,p,sA1,sB,sm5); free(A1); //m6 A1 = (float*) malloc(sizeof(float) * m * n); B1 = (float*) malloc(sizeof(float) * n * p); msub_stride(a21,a11,A1,m,n,sA,sA,sA1); madd_stride(b11,b12,B1,n,p,sB,sB,sB1); srecmult(A1,B1,m6,m,n,p,sA1,sB1,sm6); free(A1); free(B1); //m7 A1 = (float*) malloc(sizeof(float) * m * n); B1 = (float*) malloc(sizeof(float) * n * p); msub_stride(a12,a22,A1,m,n,sA,sA,sA1); madd_stride(b21,b22,B1,n,p,sB,sB,sB1); srecmult(A1,B1,m7,m,n,p,sA1,sB1,sm7); free(A1); free(B1); // c11 A1 = (float*) malloc(sizeof(float) * m * p); sA1 = p; madd_stride(m1,m7,m7,m,p,sm1,sm7,sm7); msub_stride(m4,m5,A1,m,p,sm4,sm5,sA1); madd_stride(m7,A1,m7,m,p,sm7,sA1,sm7); free(A1); // c22 A1 = (float*) malloc(sizeof(float) * m * p); sA1 = p; madd_stride(m1,m6,m6,m,p,sm1,sm6,sm6); msub_stride(m3,m2,A1,m,p,sm3,sm2,sA1); madd_stride(m6,A1,m6,m,p,sm6,sA1,sm6); free(A1); //c12 madd_stride(m3,m5,m5,m,p,sm3,sm5,sm5); //c21 madd_stride(m4,m2,m2,m,p,sm4,sm2,sm2); free(m1); free(m3); free(m4); } } void smult(float* A, float* B, float* C,int m,int n, int p) { int a,b,c,nrec; float *X,*Y,*Z,*P; a = m; b = n; c = p; nrec = findrec(&a,&b,&c); X = (float*) malloc(sizeof(float) * a * b); Y = (float*) malloc(sizeof(float) * b * c); Z = (float*) malloc(sizeof(float) * a * c); P = (float*) malloc(sizeof(float) * (a/2) * (c/2)); add_zero_pad(A,m,n,a-m,b-n,X); add_zero_pad(B,n,p,b-n,c-p,Y); srecmult(X,Y,Z,a,b,c,b,c,c); // Memory allocation needs work remove_zero_pad(Z,a,c,a-m,c-p,C); // free X,Y,Z free(X); free(Y); free(Z); free(P); } void mmult(float* A, float* B, float* C,int m,int n, int p) { if (m+n+p <= CUTOFF/2) { nmult(A,B,C,m,n,p); } else { smult(A,B,C,m,n,p); } } static int pludecomp(float *A,int N,int *ipiv) { int k,j,l,c1,c2,mind,tempi; float ld,mult,mval,temp; for(k=0;k < N;++k) ipiv[k] = k; for(k = 0; k < N-1; ++k) { //c2 = k*N; mval = fabsf(A[k*N + k]); mind = k; for (j=k+1; j < N;++j) { if (mval < fabs(A[j*N + k])) { mval = A[j*N + k]; mind = j; } } if ( mind != k) { c1 = k *N; c2 = mind * N; tempi = ipiv[mind]; ipiv[mind] = ipiv[k]; ipiv[k] = tempi; for (j = 0; j < N;j++) { temp = A[c1 + j]; *(A + c1 + j) = *(A + c2 + j); *(A + c2 + j) = temp; } } c2 = k*N; ld = A[c2 + k]; if (ld != 0.) { for (j = k+1; j < N; ++j) { c1 = j*N; mult = A[c1+k] /= ld; //printf("\n k %d j %d mult %f \n",k,j,mult); for(l = k+1; l < N; ++l) { A[c1+l] -= mult * A[c2 + l]; } } } } return 0; } void ludecomp(float *A,int N,int *ipiv) { pludecomp(A,N,ipiv); } void linsolve(float *A,int N,float *b,int *ipiv,float *x) { int i,j,c1,l; float *y; float sum; y = (float*) malloc(sizeof(float) *N); /* * Two step Solution L * U * x = b * Let U*x = y * Solve L * y = b for y (Forward Substitution * Solve U * x = b for x (Back Substitution) */ for(i = 0; i < N;++i) { y[i] = 0.; x[i] = 0.; if ( A[i*N + i] == 0.) { printf("The Matrix system does not have a unique solution"); exit(1); } //printf("\n B %d",ipiv[i]); } // Forward Substitution y[0] = b[ipiv[0]]; for(i = 1; i < N; ++i) { sum = 0.; c1 = i*N; for(j = 0; j < i; ++j) { sum += y[j] * A[c1 + j]; } y[i] = b[ipiv[i]] - sum; } // Back Substitution x[N - 1] = y[N - 1]/A[N * N - 1]; for (i = N - 2; i >= 0; i--) { sum = 0.; c1 = i*(N+1); l=0; for(j = i+1; j < N;j++) { l++; sum += A[c1 + l] * x[j]; } x[i] = (y[i] - sum) / A[c1]; } free(y); } void minverse(float *A,int N,int *ipiv,float *inv) { int i,j,stride; float *col,*x; col = (float*) malloc(sizeof(float) * N); x = (float*) malloc(sizeof(float) * N); for (i = 0; i < N; ++i) { col[i] = 0.; x[i] = 0.; } for (i = 0; i < N; ++i) { col[i] = 1.; linsolve(A,N,col,ipiv,x); stride = i; for(j = 0; j < N;++j) { inv[stride] = x[j]; stride+= N; } col[i] = 0.; } free(x); free(col); } void eye(float *mat,int N) { int i,j,t; for(i = 0;i < N;++i) { for(j =0; j < N;++j) { t = i*N; if (i == j) { mat[t+j] = 1.; } else { mat[t+j] = 0.; } } } } static float house_1(float*x,int N,float *v) { float beta,mu,temp; float *sigma; int i; sigma = (float*) malloc(sizeof(float) * 1); if (N > 1) { mmult(x+1,x+1,sigma,1,N-1,1); } else { sigma[0] = 0.0; } v[0] =1.; for (i = 1; i < N;++i) { v[i] = x[i]; } if (sigma[0] == 0. && x[0] >= 0.) { beta = 0.; } else if (sigma[0] == 0. && x[0] < 0.) { beta = -2.; }else { mu = sqrtf(sigma[0] + x[0] * x[0]); if (x[0] <= 0.) { v[0] = x[0] - mu; } else { v[0] = - sigma[0] / (x[0] + mu); } temp = v[0]; beta = (2.0f * v[0] * v[0]) /(sigma[0] + v[0] * v[0]); for (i = 0; i < N;++i) { v[i] /= temp; } } free(sigma); return beta; } float house_2(float*x,int N,float *v) { float sgn,beta,sc; float *sigma,*e; int i; sigma = (float*) malloc(sizeof(float) * 1); e = (float*) malloc(sizeof(float) * N); beta = 2.0; sgn = 1.0; mmult(x,x,sigma,1,N,1); sigma[0] = sqrtf(sigma[0]); e[0] =1.; for (i = 1; i < N;++i) { e[i] = 0.; } if (x[0] > 0.) { sgn = 1.0; } else if (x[0] < 0.) { sgn = -1.0; } else if (x[0] == 0.) { sgn = 0.; } sc = sigma[0] * sgn; //scale(e,N,1,sc); e[0] *= sc; for(i = 0; i < N;++i) { v[i] = e[i] + x[i]; } mmult(v,v,sigma,1,N,1); sigma[0] = sqrtf(sigma[0]); for(i = 0; i < N;++i) { v[i] = v[i] / sigma[0]; } free(sigma); free(e); return beta; } float house(float*x,int N,float *v) { float beta; beta = house_1(x,N,v); return beta; } void housemat(float *v, int N,float beta,float *mat) { float *temp; temp = (float*) malloc(sizeof(float) * N * N); eye(mat,N); mmult(v,v,temp,N,1,N); scale(temp,N,N,beta); msub(mat,temp,mat,N,N); free(temp); } void qrdecomp(float *A, int M, int N,float *bvec) { int j,i,k,u,t; float *x,*v,*AT,*w; float beta; if (M < N) { printf("M should be greater than or equal to N"); exit(1); } x = (float*) malloc(sizeof(float) * M); v = (float*) malloc(sizeof(float) * M); AT = (float*) malloc(sizeof(float) * M * N); w = (float*) malloc(sizeof(float) * M * M); for(j = 0; j < N;++j) { for(i=j;i < M;++i) { x[i-j] = A[i*N+j]; } beta = house(x,M-j,v); bvec[j] = beta; for (i=j; i < M; i++) { t = i * N; u = 0; for (k=j; k < N; k++) { AT[u+i-j] = A[k+t]; u+=(M-j); } } mmult(AT,v,w,N-j,M-j,1); scale(w,N-j,1,beta); mmult(v,w,AT,M-j,1,N-j); for (i=j; i < M; i++) { t = i *N; for (k=j; k < N; k++) { A[t+k] -= AT[(i-j)*(N-j) + k - j]; } } if (j < M) { for(i=j+1;i < M;++i) { A[i*N+j] = v[i-j]; } } } free(x); free(v); free(AT); free(w); } void getQR(float *A,int M,int N,float *bvec,float *Q, float *R) { int i,j,k,t,u; float *x,*v,*AT,*w; x = (float*) malloc(sizeof(float) * M); v = (float*) malloc(sizeof(float) * M); AT = (float*) malloc(sizeof(float) * M * N); w = (float*) malloc(sizeof(float) * M * M); for(i = 0; i < N;++i) { t = i *N; for(j = 0; j < N;++j) { if (i > j) { R[t+j] = 0.; } else { R[t+j] = A[t+j]; } } } for(i = 0; i < M;++i) { t = i *N; for(j = 0; j < N;++j) { if (i == j) { Q[t+j] = 1.; } else { Q[t+j] = 0.; } } } for(j = N-1; j >= 0;--j) { v[0] = 1.; for(i=j+1;i < M;++i) { v[i-j] = A[i*N+j]; } for (i=j; i < M; i++) { t = i * N; u = 0; for (k=j; k < N; k++) { AT[u+i-j] = Q[k+t]; u+=(M-j); } } mmult(AT,v,w,N-j,M-j,1); scale(w,N-j,1,bvec[j]); mmult(v,w,AT,M-j,1,N-j); for (i=j; i < M; i++) { t = i *N; for (k=j; k < N; k++) { Q[t+k] -= AT[(i-j)*(N-j) + k - j]; } } } free(x); free(v); free(AT); free(w); } void hessenberg(float *A,int N) { int k,i,j,t,u; float *x,*v,*AT,*w; float beta; x = (float*) malloc(sizeof(float) * N); v = (float*) malloc(sizeof(float) * N); AT = (float*) malloc(sizeof(float) * N * N); w = (float*) malloc(sizeof(float) * N); for (k = 0; k < N-2;++k) { for(i=k + 1;i < N;++i) { x[i-k-1] = A[i*N+k]; //printf("x %f \n",x[i-k-1]); } beta = house(x,N-k-1,v); for (i=k+1; i < N; i++) { t = i * N; u = 0; for (j=k; j < N; j++) { AT[u+i-k-1] = A[j+t]; u+=(N-k-1); } } //mdisplay(AT,N-k,N-k-1); mmult(AT,v,w,N-k,N-k-1,1); scale(w,N-k,1,beta); mmult(v,w,AT,N-k-1,1,N-k); //mdisplay(AT,N-k-1,N-k); for (i=k+1; i < N; i++) { t = i * N; for (j=k; j < N; j++) { A[t+j] -= AT[(i-k-1)*(N-k) + j - k]; } } //mdisplay(A,N,N); for (i=0; i < N; i++) { t = i * N; u = i * (N-k-1); for (j=k+1; j < N; j++) { AT[u+j-k-1] = A[t+j]; } } //mdisplay(AT,N,N-k-1); mmult(AT,v,w,N,N-k-1,1); scale(w,N,1,beta); mmult(w,v,AT,N,1,N-k-1); //mdisplay(AT,N,N-k-1); for (i=0; i < N; i++) { t = i * N; u = i * (N-k-1); for (j=k+1; j < N; j++) { A[t+j] -= AT[u+j-k-1]; } } } free(x); free(v); free(AT); free(w); } void francisQR(float *A,int N) { int m,n,k,q,r,t,u,i,j; float s,t2,beta; float *x,*v,*AT,*w; int NN; /* * Reference - Algorithm 7.5.1 Golub,van Loan Matrix Computations 3rd Edition */ x = (float*) malloc(sizeof(float) * 3); v = (float*) malloc(sizeof(float) * 3); AT = (float*) malloc(sizeof(float) * 3 * N); w = (float*) malloc(sizeof(float) * N); n = N-1; m = n-1; NN = N*N; s = A[NN-1] + A[NN-N-2]; t2 = A[NN-1] * A[NN-N-2] - A[NN-2] * A[NN-N-1]; x[0] = A[0]*A[0] + A[1]*A[N] - s*A[0] + t2; x[1] = A[N]*(A[0] + A[N+1] - s); x[2] = A[N] * A[N+N+1]; if (N <= 2) { return; } for (k = -1; k < N - 3;++k) { beta = house(x,3,v); //mdisplay(x,3,1); if (k > 0) { q = k; } else { q = 0; } //printf("q %d \n",q); for (i=k+1; i < k+4; i++) { t = i * N; u = 0; for (j=q; j < N; j++) { AT[u+i-k-1] = A[j+t]; u+=3; } } mmult(AT,v,w,N-q,3,1); scale(w,N-q,1,beta); mmult(v,w,AT,3,1,N-q); for (i=k+1; i < k+4; i++) { t = i * N; for (j=q; j < N; j++) { A[t+j] -= AT[(i-k-1)*(N-q) + j - q]; } } //mdisplay(A,N,N); if (k+4 >= n) { r = N; } else { r = k+4+1; } //printf("r %d \n",r); for (i=0; i < r; i++) { t = i * N; u = i * 3; for (j=k+1; j < k+4; j++) { AT[u+j-k-1] = A[t+j]; } } mmult(AT,v,w,r,3,1); scale(w,r,1,beta); mmult(w,v,AT,r,1,3); //mdisplay(AT,N,N-k-1); for (i=0; i < r; i++) { t = i * N; u = i * 3; for (j=k+1; j < k+4; j++) { A[t+j] -= AT[u+j-k-1]; } } //mdisplay(A,N,N); x[0] = A[N*(k+2) + k+1]; x[1] = A[N*(k+3) + k+1]; if (k < n-3) { x[2] = A[N*(k+4) + k+1]; } //mdisplay(x,3,1); } //mdisplay(x,2,1); beta = house(x,2,v); for (i=n-1; i < N; i++) { t = i * N; u = 0; for (j=n-2; j < N; j++) { AT[u+i-n+1] = A[j+t]; u+=2; } } mmult(AT,v,w,3,2,1); scale(w,3,1,beta); mmult(v,w,AT,2,1,3); for (i=n-1; i < N; i++) { t = i * N; for (j=n-2; j < N; j++) { A[t+j] -= AT[(i-n+1)*3 + j - n + 2]; } } for (i=0; i < N; i++) { t = i * N; u = i * 2; for (j=n-1; j < N; j++) { AT[u+j-n+1] = A[t+j]; } } mmult(AT,v,w,N,2,1); scale(w,N,1,beta); mmult(w,v,AT,N,1,2); //mdisplay(AT,N,N-k-1); for (i=0; i < N; i++) { t = i * N; u = i * 2; for (j=n-1; j < N; j++) { A[t+j] -= AT[u+j-n+1]; } } free(x); free(v); free(AT); free(w); } void eig22(float *A, int stride,float *eigre,float *eigim) { int N; float a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,t2,at11,at12,at21,at22; N = stride; a11 = A[0]; a12 = A[1]; a21 = A[N]; a22 = A[N+1]; if ( (a12 + a21) == 0) { c = 1.0f/sqrtf(2.0f); s = c; } else { t1 = (a11 - a22) / (a12 + a21); t = t1 /(1.0f + sqrtf(1+t1*t1)); c = 1.0f/sqrtf(1 + t*t); s = c*t; } c2 = c*c; s2 = s*s; cs = c*s; at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21); at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22); at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22); at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21); eigre[0] = eigre[1] = at11; eigim[0] = sqrtf(-at12 * at21); eigim[1] = -sqrtf(-at12 * at21); if ( at12*at21 >= 0) { if (at12 == 0) { c = 0; s = 1; c2 = 0; s2 = 1; cs = 0; } else { t = sqrtf(at21/at12); t2 = t * t; cs = t/(1+t2); c2 = (1+t2); s2 = t2 /(1+t2); } eigim[0] = eigim[1] = 0.0; eigre[0] = at11 - cs * (at12 + at21); eigre[1] = at11 + cs * (at12 + at21); } } int francis_iter(float *A, int N, float *H) { int success,brkpoint; int i,j,it,p,q,t,u; float *temp; success = 0; brkpoint = 30 * N; it = 0; p = N - 1; temp = (float*) malloc(sizeof(float) * N * N); for(i = 0; i < N*N;++i) { H[i] = A[i]; } hessenberg(H,N); while (p > 1 && it < brkpoint) { while (p > 1 && (H[N*p + p-1] == 0 || H[N*(p-1) + p-2] == 0)) { if (H[N*p + p-1] == 0) { p--; } else if (H[N*(p-1) + p-2] == 0) { p=p-2; } } if (p > 0) { q = p-1; while (q > 0 && fabs(H[N*q + q-1]) != 0) { q--; } //printf("%d %d \n",q,p); for (i=q; i <= p; i++) { t = i * N; u = (i-q) * (p-q+1); for (j=q; j <= p; j++) { temp[u+j-q] = H[t+j]; } } francisQR(temp,p-q+1); for (i=q; i <= p; i++) { t = i * N; u = (i-q) * (p-q+1); for (j=q; j <= p; j++) { H[t+j] = temp[u+j-q]; } } //mdisplay(H,N,N); for(i = q; i <= p-1;++i) { if ( fabs(H[(i+1)*N+i]) <= TOL * (fabs(H[i*N+i]) + fabs(H[(i+1)*N+i+1]) ) ) { H[(i+1)*N+i] = 0.; } } it++; //printf("iter %d \n",it); } } if (it == brkpoint) { success = 0; } else { success = 1; } free(temp); return success; } static void eig2t(float *A, int stride) { int N; float a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,at11,at12,at21,at22; N = stride; a11 = A[0]; a12 = A[1]; a21 = A[N]; a22 = A[N+1]; if ( (a12 + a21) == 0) { c = 1.0f/sqrtf(2.0f); s = c; } else { t1 = (a11 - a22) / (a12 + a21); t = t1 /(1.0f + sqrtf(1+t1*t1)); c = 1.0f/sqrtf(1 + t*t); s = c*t; } c2 = c*c; s2 = s*s; cs = c*s; at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21); at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22); at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22); at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21); A[0] = at11; A[1] = at12; A[N] = at21; A[N+1] = at22; } void eig(float *A,int N,float *eigre,float *eigim) { int i,t,u,n; float *H; float t1,t2,cs; H = (float*) malloc(sizeof(float) * N * N); n = N - 1; francis_iter(A,N,H); //mdisplay(H,N,N); i = 0; while (i < n) { u = i * N; t = (i+1)*N; if (H[t+i] != 0.) { eig2t(H+u+i,N); i = i +2; } else { i++; } } //mdisplay(H,N,N); i = 0; while (i < n) { u = i * N; t = (i+1)*N; if (H[t+i] != 0.) { if (H[u+i+1] * H[t+i] < 0.) { eigre[i] = H[u+i]; eigre[i+1] = H[t+i+1]; eigim[i] = sqrtf(-H[u+i+1] * H[t+i]); eigim[i+1] = -sqrtf(-H[u+i+1] * H[t+i]); } else { if (H[u+i+1] == 0.) { cs = 0.; } else { t1 = sqrtf(H[t+i]/H[u+i+1]); t2 = t1 * t1; cs = t1/(1+t2); } eigre[i] = H[u+i] - cs * (H[u+i+1] + H[t+i]); eigre[i+1] = H[u+i] + cs * (H[u+i+1] + H[t+i]); eigim[i] = 0.; eigim[i+1] = 0.; } i= i + 2; } else { eigre[i] = H[u+i]; eigim[i] = 0.; i++; } } if (i == n) { eigre[i] = H[N*N - 1]; eigim[i] = 0.; } free(H); } static int rcholu(float *A,int N, int stride, float *U22) { int sc; int j,i,u,w; float u11; if (N == 1) { if (A[0] > 0) { A[0] = sqrtf(A[0]); return 0; } else { return -1; } } else { if (A[0] < 0) { return -1; } u11 = sqrtf(A[0]); A[0] = u11; for (j = 1; j < N;++j) { A[j] /= u11; } mmult(A+1,A+1,U22,N-1,1,N-1); for (i = 0; i < N-1; ++i) { u = stride + 1+ i * stride; w = i * (N-1); for(j = i; j < N-1;j++) { A[j + u] -= U22[j + w]; } } sc = rcholu(A+stride+1,N-1,stride,U22); if (sc == -1) { return -1; } } return sc; } static int rbcholu(float *A,int N, int stride, float *UB, float *UT) { int bs,bb,i,j,Nb,t,k,u,v,w,sc; float *b,*x,*U12,*U12T; float sum; bs = (int) BLOCKSIZE; bb = bs*bs; if (N <= BLOCKSIZE) { sc = rcholu(A,N,stride,UB); if (sc == -1) { return -1; } } else { Nb = N - bs; x = (float*) malloc(sizeof(float) * bs); b = (float*) malloc(sizeof(float) * bs); U12T = (float*) malloc(sizeof(float) * Nb * bs); U12 = (float*) malloc(sizeof(float) * Nb * bs); rcholu(A,bs,stride,UB); // U11 for (i =0; i < bs;++i) { t = i *stride; u = 0; for(j = 0; j < N;++j) { UT[u+i] = A[j+t]; u += bs; } } for(k = 0; k < Nb;++k) { u = k * bs; for(i = 0; i < bs;++i) { b[i] = UT[bb+u+i]; x[i] = 0.; } for (i = 0; i < bs;++i) { t = i*bs; sum = 0; for (j = 0; j < i;++j) { sum += UT[t+j] * x[j]; } x[i] = (b[i] - sum) / UT[t+i]; } v = bs + k; for(i = 0; i < bs;++i) { A[v] = x[i]; U12T[u+i] = x[i]; v += stride; } } mtranspose(U12T,Nb,bs,U12); mmult(U12T,U12,UT,Nb,bs,Nb); free(U12T); free(U12); free(b); free(x); for (i = 0; i < Nb; ++i) { u = bs * stride + bs + i * stride; w = i * Nb; for(j = i; j < Nb;j++) { A[j + u] -= UT[j + w]; } } sc = rbcholu(A + bs * stride + bs,Nb,stride,UB,UT); if (sc == -1) { return -1; } } return sc; } int cholu(float *A, int N) { int stride,i,j,t,sc; float *U22; U22 = (float*) malloc(sizeof(float) * N * N); stride = N; sc = rcholu(A,N,stride,U22); for(i=0; i < N;++i) { t = i *N; for(j=0;j < i;++j) { A[t+j] = 0.; } } free(U22); return sc; } int bcholu(float *A, int N) { int stride,i,j,t,b,sc; float *UB,*UT; b = (int) BLOCKSIZE; UT = (float*) malloc(sizeof(float) * N * N); UB = (float*) malloc(sizeof(float) * b * b); stride = N; sc = rbcholu(A,N,stride,UB,UT); for(i=0; i < N;++i) { t = i *N; for(j=0;j < i;++j) { A[t+j] = 0.; } } free(UB); free(UT); return sc; } int chol(float *A, int N) { int sc; if ( N <= (int) BLOCKSIZE) { sc = cholu(A,N); } else { sc = bcholu(A,N); } return sc; } static void rchold(float *A,int N, int stride, float *U22) { int j,i,u,w; float d1; if (N == 1) { return; } else { d1 = A[0]; for (j = 1; j < N;++j) { A[j] /= d1; } mmult(A+1,A+1,U22,N-1,1,N-1); scale(U22,N-1,N-1,d1); for (i = 0; i < N-1; ++i) { u = stride + 1+ i * stride; w = i * (N-1); for(j = i; j < N-1;j++) { A[j + u] -= U22[j + w]; } } rchold(A+stride+1,N-1,stride,U22); } } void chold(float *A, int N) { int stride,i,j,t; float *U22; U22 = (float*) malloc(sizeof(float) * N * N); stride = N; rchold(A,N,stride,U22); for(i=0; i < N;++i) { t = i *N; for(j=0;j < i;++j) { A[t+j] = 0.; } } free(U22); } void svd_sort(float *U,int M,int N,float *V,float *q) { /* * Pavel Sakov's CSA SVD sort routine is used with some minor * modifications. See The License below */ /* * Copyright (C) 2000-2008 Pavel Sakov and CSIRO Redistribution and use of material from the package `csa', with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of material must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The names of the authors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ int i,j; float *UT,*VT,*qq; int *pos; UT = (float*) malloc(sizeof(float) * N * M); VT = (float*) malloc(sizeof(float) * N * N); qq = (float*) malloc(sizeof(float) * N); pos = (int*) malloc(sizeof(int) * N); for(i = 0;i < N;++i) { qq[i] = q[i]; } for(i = 0;i < M*N;++i) { UT[i] = U[i]; } for(i = 0;i < N*N;++i) { VT[i] = V[i]; } //mtranspose(U,M,N,UT); //mtranspose(V,N,N,VT); sort1d(q,N,pos); for(i = 0; i < N;++i) { q[i] = qq[pos[i]]; for (j = 0; j < M;++j) { U[j*N+i] = UT[j*N+pos[i]]; } for (j = 0; j < N;++j) { V[j*N+i] = VT[j*N+pos[i]]; } } free(UT); free(VT); free(qq); free(pos); } int svd(float *A,int M,int N,float *U,float *V,float *q) { int i,j,k,l,t,t2,ierr,cancel,iter,l1; float eps,g,x,s,temp,f,h,c,y,z,scale; float *e; /* THIS SUBROUTINE IS THE MODIFIED C TRANSLATION OF THE EISPACK FORTRAN TRANSLATION OF THE ALGOL PROCEDURE SVD, NUM. MATH. 14, 403-420(1970) BY GOLUB AND REINSCH. HANDBOOK FOR AUTO. COMP., VOL II-LINEAR ALGEBRA, 134-151(1971). */ /* * U = MXN * V - NXN * Q - NX1 */ /* * The program return error codes * * Code 0 if the computation is successful * Code -1 If M < N . Transpose the matrix such that rows > columns and trye again * Code 15 if maximum iterations are reached without achieving convergence. Increase SVDMAXITER value * in matrix.h header file. Default Value is 50 * */ if (M < N) { printf("Rows (M) should be greater than Columns (B) \n"); printf("Retry By Transposing the Input Matrix"); return -1; } e = (float*) malloc(sizeof(float) * N); ierr = 0; eps = macheps(); g = scale = x = 0.0; for(i = 0; i < M*N;++i) { U[i] = A[i]; } for(i = 0; i < N;++i) { l = i+1; e[i] = scale * g; g = 0.0; s = 0.0; scale = 0.0; if (i < M) { for(k = i; k < M;++k) { scale += fabsf(U[k*N+i]); } if (scale != 0.0) { for(k = i; k < M;++k) { t = k * N; U[t+i] /= scale; temp = U[t+i]; s += temp*temp; } f = U[i*N+i]; g = (f < 0) ? sqrtf(s) : -sqrtf(s); h = f * g - s; U[i*N+i] = f - g; if (i < N - 1) { for(j = l; j < N;++j) { s = 0.0; for(k = i; k < M;++k) { t = k * N; s += U[t+i]*U[t+j]; } f = s / h; for(k = i; k < M;++k) { t = k * N; U[t+j] += f * U[t+i]; } } } for(k = i; k < M;++k) { t = k * N; U[t+i] *= scale; } } } q[i] = scale * g; g = 0.0; s = 0.0; scale = 0.0; if (i < M && i != N - 1) { t = i *N; for(k = l; k < M;++k) { scale += fabsf(U[t+k]); } if (scale != 0.0) { for(k = l; k < N;++k) { U[t+k] /= scale; temp = U[t+k]; s = s + temp*temp; } f = U[t+l]; g = (f < 0) ? sqrtf(s) : -sqrtf(s); h = f * g - s; U[t+l] = f - g; for(k = l;k < N;++k) { e[k] = U[t+k] / h; } for (j = l; j < M; j++) { s = 0.0; t2 = j * N; for (k = l; k < N; k++) { s += U[t2+k] * U[t+k]; } for (k = l; k < N; k++) { U[t2+k] += s * e[k]; } } for (k = l; k < N; k++) U[t+k] *= scale; } } temp = fabsf(q[i]) + fabsf(e[i]); if (x < temp) { x = temp; } } /* ierr = 0; eps = macheps(); tol = eps; g = x = 0.0; for(i = 0; i < M*N;++i) { U[i] = A[i]; } for(i = 0; i < N;++i) { l = i+1; e[i] = g; s = 0.0; for(k = i; k < M;++k) { t = k * N; temp = U[t+i]; s += temp*temp; } if (s < tol) { g = 0.0; } else { f = U[i*N+i]; g = (f < 0) ? sqrtf(s) : -sqrtf(s); h = f * g - s; U[i*N+i] = f - g; for(j = l; j < N;++j) { s = 0.0; for(k = i; k < M;++k) { t = k * N; s += (U[t+i]*U[t+j]); } f = s / h; for(k = i; k < M;++k) { t = k * N; U[t+j] += (f * U[t+i]); } } } q[i] = g; s = 0.0; t = i * N; for(k = l; k < N;++k) { temp = U[t+k]; s = s + temp*temp; } if (s < tol) { g = 0.0; } else { f = U[t+l]; g = (f < 0) ? sqrtf(s) : -sqrtf(s); h = f * g - s; U[t+l] = f - g; for(k = l;k < N;++k) { e[k] = U[t+k] / h; } for (j = l; j < M; j++) { s = 0.0; t2 = j * N; for (k = l; k < N; k++) { s += U[t2+k] * U[t+k]; } for (k = l; k < N; k++) { U[t2+k] += s * e[k]; } } } temp = fabs(q[i]) + fabs(e[i]); if (x < temp) { x = temp; } } */ //Accumulating Right Hand Transformations for(i = N - 1;i >= 0;--i) { t = i * N; if (i < N - 1) { if (g != 0.0) { h = U[t+i+1] * g; for(j = l;j < N;++j) { V[j*N+i] = U[t+j] / h; } for(j = l;j < N;++j) { s = 0.0; for(k = l; k < N;++k) { s += U[t+k] * V[k*N+j]; } for(k = l; k < N;++k) { V[k*N+j] += (s * V[k*N+i]); } } } for(j = l; j < N;++j) { V[t+j] = V[j*N+i] = 0.0; } } V[t+i] = 1.0; g = e[i]; l = i; } //Accumulating Left Hand Transformations for(i = N - 1;i >= 0;--i) { t = i * N; l = i+1; g = q[i]; if (i < N - 1) { for(j = l;j < N;++j) { U[t+j] = 0.0; } } if (g != 0.0) { if (i != N - 1) { //h = U[t+i] * g; for(j = l;j < N;++j) { s = 0.0; for(k = l; k < M;++k) { s += (U[k*N+i] * U[k*N+j]); } f = (s / U[t+i]) / g; for(k = i; k < M;++k) { U[k*N+j] += (f * U[k*N+i]); } } } for(j = i; j < M;++j) { U[j*N+i] = U[j*N+i] / g; } } else { for(j = i; j < M;++j) { U[j*N+i] = 0.0; } } U[t+i] += 1.0; } // mdisplay(U,M,N); eps = eps * x; for(k = N - 1; k >= 0; --k) { iter = 0; while(1) { iter++; if (iter > SVDMAXITER) { printf("Convergence Not Achieved \n"); return 15; } cancel = 1; for(l = k; l >= 0; --l) { if (fabs(e[l]) <= eps) { cancel = 0; //test f convergence break; } if (fabs(q[l-1]) <= eps) { //Cancel break; } } if (cancel) { c = 0.0; s = 1.0; l1 = l - 1; for(i = l; i <= k;++i) { f = s*e[i]; e[i] *= c; if (fabs(f) <= eps) { break; } g = q[i]; h = q[i] = hypotf(f,g); c = g/h; s = -f/h; for(j = 0; j < M;++j) { t = j * N; y = U[t+l1]; z = U[t+i]; U[t+l1] = y * c + z * s; U[t+i] = z * c - y * s; } } } z = q[k]; if (l != k) { x = q[l]; y = q[k-1]; g = e[k-1]; h = e[k]; f = 0.5f * (((g + z) / h) * ((g - z) / y) + y / h - h / y); g = hypotf(f,1.0); if (f < 0.0) { temp = f - g; } else { temp = f+g; } f = x - (z / x) * z + (h / x) * (y / temp - h); //Next QR Transformation c = s = 1.0; for(i = l+1; i <= k;++i) { g = e[i]; y = q[i]; h = s * g; g = c * g; e[i-1] = z = hypotf(f,h); c = f / z; s = h / z; f = x * c + g * s; g = g * c - x * s; h = y * s; y *= c; for(j = 0; j < N;++j) { t = j * N; x = V[t+i-1]; z = V[t+i]; V[t+i-1] = x * c + z * s; V[t+i] = z * c - x * s; } q[i-1] = z = hypotf(f,h); if (z != 0.0) { c = f / z; s = h / z; } f = c * g + s * y; x = c * y - s * g; for(j = 0; j < M;++j) { t = j * N; y = U[t+i-1]; z = U[t+i]; U[t+i-1] = y * c + z * s; U[t+i] = z * c - y * s; } } e[l] = 0.0; e[k] = f; q[k] = x; } else { //convergence if (z < 0.0) { q[k] = -z; for (j = 0; j < N; j++) { t = j *N; V[t+k] = -V[t+k]; } } break; } } } svd_sort(U,M,N,V,q); free(e); return ierr; } int svd_transpose(float *A, int M, int N, float *U, float *V, float *q) { int ret; /* Call this routine if M < N * U = MXM * V - NXM * Q - MX1 */ if (M >= N) { printf("M>=N. Use svd routine.\n"); exit(-1); } mtranspose(A, M, N, V); ret = svd(V, N, M, V, U, q); return ret; } static int rank_c(float *A, int M,int N) { int i,rnk,ret; float eps,tol,szmax,qmax; float *U,*V,*q; U = (float*) malloc(sizeof(float) * M*N); V = (float*) malloc(sizeof(float) * N*N); q = (float*) malloc(sizeof(float) * N); eps = macheps(); rnk = 0; if (M < N) { //mtranspose(A,M,N,U); szmax = (float) N; } else { szmax = (float) M; } ret = svd(A,M,N,U,V,q); qmax = q[0]; if ( ret != 0) { printf("Failed to Compute SVD"); free(U); free(V); free(q); return -1; } tol = qmax*szmax *eps; for(i = 0; i < N;++i) { if (q[i] > tol) { rnk++; } } free(U); free(V); free(q); return rnk; } int rank(float *A, int M,int N) { int rnk; float *AT; AT = (float*) malloc(sizeof(float) * M*N); if (M < N) { mtranspose(A,M,N,AT); rnk = rank_c(AT,N,M); } else { rnk = rank_c(A,M,N); } free(AT); return rnk; } int lls_svd_multi(float *A, float *b, int M,int N, float *x) { int rnk, ret, i; float *U, *V, *q, *UT, *d; float eps, tol, szmax, qmax; if (M < N) { printf("Rows (M) should be greater than Columns (B) \n");\ return -1; } U = (float*)malloc(sizeof(float)* M*N); V = (float*)malloc(sizeof(float)* N*N); q = (float*)malloc(sizeof(float)* N); UT = (float*)malloc(sizeof(float)* M*N); d = (float*)malloc(sizeof(float)* N); /* The code returns -1 if SVD computation fails else it returns the rank of the matrix A (and the real size of vector x) */ ret = svd(A, M, N, U, V, q); if (ret != 0) { printf("Failed to Compute SVD"); free(U); free(V); free(q); free(UT); free(d); return -1; } szmax = (float)M; eps = macheps(); rnk = 0; qmax = q[0]; tol = qmax*szmax *eps; for (i = 0; i < N; ++i) { if (q[i] > tol) { rnk++; } } mtranspose(U, M, N, UT); d = (float*)malloc(sizeof(float)* N); mmult(UT, b, d, N, M, 1); for (i = 0; i < rnk; ++i) { d[i] /= q[i]; } for (i = rnk; i < N; ++i) { d[i] = 0.0; } mmult(V, d, x, N, N, 1); free(U); free(V); free(q); free(UT); free(d); return(rnk); }
multibit_fmt_plug.c
/* * JtR format to crack password protected MultiBit Wallets. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * All credit goes to Christopher Gurnee for making this work possible. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_multibit; #elif FMT_REGISTERS_H john_register_one(&fmt_multibit); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "md5.h" #include "escrypt/crypto_scrypt.h" #include "jumbo.h" #include "memdbg.h" #include "unicode.h" #define FORMAT_NAME "MultiBit Wallet" #define FORMAT_LABEL "multibit" #define FORMAT_TAG "$multibit$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5/scrypt AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 // just 4 is better for v2 salts static struct fmt_tests multibit_tests[] = { // Wallets created by MultiBit Classic 0.5.18 {"$multibit$1*0908a1bd44147709*c82b6d0409c1e46a4660ea6d4fa9ae12e4e234c98a71a51ced105c7e66a57ca3", "openwall"}, {"$multibit$1*2043ebb14b6d9670*24284a38a62b6a63fb0912ebc05aa9d26d6fd828134d20b9778d8d841f65f584", "openwall123"}, // MultiBit HD wallet 0.5.0 {"$multibit$2*081e3a1252c26731120d0d63783ae46f*8354d5b454e78fb15f81c9e6289ba9b8*081e3a1252c26731120d0d63783ae46f", "openwall"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct custom_salt { uint32_t type; unsigned char salt[16]; unsigned char block[32]; unsigned char iv[16]; unsigned char block2[16]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); if (omp_t > 1) { self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; } #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "*")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2) goto err; if (value == 1) { if ((p = strtokm(NULL, "*")) == NULL) // salt goto err; if (hexlenl(p, &extra) != 8 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted blocks goto err; if (hexlenl(p, &extra) != 32 * 2 || extra) goto err; } else if (value == 2) { if ((p = strtokm(NULL, "*")) == NULL) // iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with hardcoded iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); if (cs.type == 1) { for (i = 0; i < 8; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else if (cs.type == 2) { for (i = 0; i < 16; i++) cs.iv[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block2[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } MEM_FREE(keeptr); return &cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void multibit_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static int is_bitcoinj_protobuf_data(unsigned char *block) { unsigned char c; int i; // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? if (block[0] == '\x0a' && block[1] < 128 && !memcmp((const char*)block + 2, "org.", 4)) { // If it doesn't look like a lower alpha domain name of len >= 8 (e.g. 'bitcoin.'), fail (btcrecover) for (i = 6; i < 14; i++) { c = block[i]; if ((c > 'z') || ((c < 'a') && ((c != '.')))) return 0; } return 1; // success } return 0; } static int is_base58(unsigned char *buffer, int length) { unsigned char c; int i; for (i = 0; i < length; i++) { c = buffer[i]; if ((c > 'z') || (c < '1') || ((c > '9') && (c < 'A')) || ((c > 'Z') && (c < 'a'))) { return 0; } } return 1; // success } static const unsigned char *salt_hardcoded = (unsigned char*)"\x35\x51\x03\x80\x75\xa3\xb0\xc5"; static const unsigned char *iv_hardcoded = (unsigned char*)"\xa3\x44\x39\x1f\x53\x83\x11\xb3\x29\x54\x86\x16\xc4\x89\x72\x3e"; static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char iv[16]; unsigned char key[32]; unsigned char outbuf[16]; AES_KEY aes_decrypt_key; int len = strlen(saved_key[index]); #ifdef _OPENMP if (cracked[index]) /* avoid false sharing of nearby elements */ #endif cracked[index] = 0; if (cur_salt->type == 1) { unsigned char c; MD5_CTX ctx; // key MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key, &ctx); // key + 16 MD5_Init(&ctx); MD5_Update(&ctx, key, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key + 16, &ctx); // iv MD5_Init(&ctx); MD5_Update(&ctx, key + 16, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(iv, &ctx); AES_set_decrypt_key(key, 256, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); c = outbuf[0]; if (c == 'L' || c == 'K' || c == '5' || c == 'Q') { // Does it look like a base58 private key (MultiBit, MultiDoge, or oldest-format Android key backup)? (btcrecover) // check if bytes are in base58 set [1-9A-HJ-NP-Za-km-z] if (is_base58(outbuf + 1, 15)) { // decrypt second block AES_cbc_encrypt(cur_salt->block + 16, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_base58(outbuf, 16)) cracked[index] = 1; } } else if (c == '#') { // Does it look like a KnC for Android key backup? if (memcmp((const char*)outbuf, "# KEEP YOUR PRIV", 8) == 0) // 8 should be enough cracked[index] = 1; } else if (c == '\x0a') { // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? (btcrecover)? if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } else if (cur_salt->type == 2) { UTF16 password[PLAINTEXT_LENGTH * 2 + 1]; len = enc_to_utf16_be(password, PLAINTEXT_LENGTH, (const unsigned char*)saved_key[index], len + 1); if (len < 0) len = strlen16(password); crypto_scrypt((const unsigned char*)password, (len + 1) * 2, salt_hardcoded, 8, 16384, 8, 1, key, 32); // 1 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, cur_salt->iv, 16); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; else { // 2 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, iv_hardcoded, 16); AES_cbc_encrypt(cur_salt->block2, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_multibit = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, multibit_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, multibit_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
helpme_standalone.h
// // WARNING! This file is automatically generated from the sources in the src directory. // Do not modify this source code directly as any changes will be overwritten // // original file: src/helpme.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_HELPME_H_ #define _HELPME_STANDALONE_HELPME_H_ #if __cplusplus || DOXYGEN // C++ header #include <algorithm> #include <array> #include <cmath> #include <complex> #include <functional> #include <iostream> #include <list> #include <memory> #ifdef _OPENMP #include <omp.h> #endif #include <set> #include <stdexcept> #include <string> #include <tuple> #include <unistd.h> #include <vector> // original file: src/cartesiantransform.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_CARTESIANTRANSFORM_H_ #define _HELPME_STANDALONE_CARTESIANTRANSFORM_H_ // original file: src/matrix.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_MATRIX_H_ #define _HELPME_STANDALONE_MATRIX_H_ #include <functional> #include <algorithm> #include <complex> #include <fstream> #include <functional> #include <initializer_list> #include <iostream> #include <iomanip> #include <numeric> #include <stdexcept> #include <tuple> // original file: src/lapack_wrapper.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE // // The code for Jacobi diagonalization is taken (with minimal modification) from // // http://www.mymathlib.com/c_source/matrices/eigen/jacobi_cyclic_method.c // #ifndef _HELPME_STANDALONE_LAPACK_WRAPPER_H_ #define _HELPME_STANDALONE_LAPACK_WRAPPER_H_ #include <cmath> #include <limits> namespace helpme { //////////////////////////////////////////////////////////////////////////////// // void Jacobi_Cyclic_Method // // (Real eigenvalues[], Real *eigenvectors, Real *A, int n) // // // // Description: // // Find the eigenvalues and eigenvectors of a symmetric n x n matrix A // // using the Jacobi method. Upon return, the input matrix A will have // // been modified. // // The Jacobi procedure for finding the eigenvalues and eigenvectors of a // // symmetric matrix A is based on finding a similarity transformation // // which diagonalizes A. The similarity transformation is given by a // // product of a sequence of orthogonal (rotation) matrices each of which // // annihilates an off-diagonal element and its transpose. The rotation // // effects only the rows and columns containing the off-diagonal element // // and its transpose, i.e. if a[i][j] is an off-diagonal element, then // // the orthogonal transformation rotates rows a[i][] and a[j][], and // // equivalently it rotates columns a[][i] and a[][j], so that a[i][j] = 0 // // and a[j][i] = 0. // // The cyclic Jacobi method considers the off-diagonal elements in the // // following order: (0,1),(0,2),...,(0,n-1),(1,2),...,(n-2,n-1). If the // // the magnitude of the off-diagonal element is greater than a treshold, // // then a rotation is performed to annihilate that off-diagnonal element. // // The process described above is called a sweep. After a sweep has been // // completed, the threshold is lowered and another sweep is performed // // with the new threshold. This process is completed until the final // // sweep is performed with the final threshold. // // The orthogonal transformation which annihilates the matrix element // // a[k][m], k != m, is Q = q[i][j], where q[i][j] = 0 if i != j, i,j != k // // i,j != m and q[i][j] = 1 if i = j, i,j != k, i,j != m, q[k][k] = // // q[m][m] = cos(phi), q[k][m] = -sin(phi), and q[m][k] = sin(phi), where // // the angle phi is determined by requiring a[k][m] -> 0. This condition // // on the angle phi is equivalent to // // cot(2 phi) = 0.5 * (a[k][k] - a[m][m]) / a[k][m] // // Since tan(2 phi) = 2 tan(phi) / (1 - tan(phi)^2), // // tan(phi)^2 + 2cot(2 phi) * tan(phi) - 1 = 0. // // Solving for tan(phi), choosing the solution with smallest magnitude, // // tan(phi) = - cot(2 phi) + sgn(cot(2 phi)) sqrt(cot(2phi)^2 + 1). // // Then cos(phi)^2 = 1 / (1 + tan(phi)^2) and sin(phi)^2 = 1 - cos(phi)^2 // // Finally by taking the sqrts and assigning the sign to the sin the same // // as that of the tan, the orthogonal transformation Q is determined. // // Let A" be the matrix obtained from the matrix A by applying the // // similarity transformation Q, since Q is orthogonal, A" = Q'AQ, where Q'// // is the transpose of Q (which is the same as the inverse of Q). Then // // a"[i][j] = Q'[i][p] a[p][q] Q[q][j] = Q[p][i] a[p][q] Q[q][j], // // where repeated indices are summed over. // // If i is not equal to either k or m, then Q[i][j] is the Kronecker // // delta. So if both i and j are not equal to either k or m, // // a"[i][j] = a[i][j]. // // If i = k, j = k, // // a"[k][k] = // // a[k][k]*cos(phi)^2 + a[k][m]*sin(2 phi) + a[m][m]*sin(phi)^2 // // If i = k, j = m, // // a"[k][m] = a"[m][k] = 0 = // // a[k][m]*cos(2 phi) + 0.5 * (a[m][m] - a[k][k])*sin(2 phi) // // If i = k, j != k or m, // // a"[k][j] = a"[j][k] = a[k][j] * cos(phi) + a[m][j] * sin(phi) // // If i = m, j = k, a"[m][k] = 0 // // If i = m, j = m, // // a"[m][m] = // // a[m][m]*cos(phi)^2 - a[k][m]*sin(2 phi) + a[k][k]*sin(phi)^2 // // If i= m, j != k or m, // // a"[m][j] = a"[j][m] = a[m][j] * cos(phi) - a[k][j] * sin(phi) // // // // If X is the matrix of normalized eigenvectors stored so that the ith // // column corresponds to the ith eigenvalue, then AX = X Lamda, where // // Lambda is the diagonal matrix with the ith eigenvalue stored at // // Lambda[i][i], i.e. X'AX = Lambda and X is orthogonal, the eigenvectors // // are normalized and orthogonal. So, X = Q1 Q2 ... Qs, where Qi is // // the ith orthogonal matrix, i.e. X can be recursively approximated by // // the recursion relation X" = X Q, where Q is the orthogonal matrix and // // the initial estimate for X is the identity matrix. // // If j = k, then x"[i][k] = x[i][k] * cos(phi) + x[i][m] * sin(phi), // // if j = m, then x"[i][m] = x[i][m] * cos(phi) - x[i][k] * sin(phi), and // // if j != k and j != m, then x"[i][j] = x[i][j]. // // // // Arguments: // // Real eigenvalues // // Array of dimension n, which upon return contains the eigenvalues of // // the matrix A. // // Real* eigenvectors // // Matrix of eigenvectors, the ith column of which contains an // // eigenvector corresponding to the ith eigenvalue in the array // // eigenvalues. // // Real* A // // Pointer to the first element of the symmetric n x n matrix A. The // // input matrix A is modified during the process. // // int n // // The dimension of the array eigenvalues, number of columns and rows // // of the matrices eigenvectors and A. // // // // Return Values: // // Function is of type void. // // // // Example: // // #define N // // Real A[N][N], Real eigenvalues[N], Real eigenvectors[N][N] // // // // (your code to initialize the matrix A ) // // // // JacobiCyclicDiagonalization(eigenvalues, (Real*)eigenvectors, // // (Real *) A, N); // //////////////////////////////////////////////////////////////////////////////// template <typename Real> void JacobiCyclicDiagonalization(Real *eigenvalues, Real *eigenvectors, const Real *A, int n) { int i, j, k, m; Real *pAk, *pAm, *p_r, *p_e; Real threshold_norm; Real threshold; Real tan_phi, sin_phi, cos_phi, tan2_phi, sin2_phi, cos2_phi; Real sin_2phi, cos_2phi, cot_2phi; Real dum1; Real dum2; Real dum3; Real max; // Take care of trivial cases if (n < 1) return; if (n == 1) { eigenvalues[0] = *A; *eigenvectors = 1; return; } // Initialize the eigenvalues to the identity matrix. for (p_e = eigenvectors, i = 0; i < n; i++) for (j = 0; j < n; p_e++, j++) if (i == j) *p_e = 1; else *p_e = 0; // Calculate the threshold and threshold_norm. for (threshold = 0, pAk = const_cast<Real *>(A), i = 0; i < (n - 1); pAk += n, i++) for (j = i + 1; j < n; j++) threshold += *(pAk + j) * *(pAk + j); threshold = sqrt(threshold + threshold); threshold_norm = threshold * std::numeric_limits<Real>::epsilon(); max = threshold + 1; while (threshold > threshold_norm) { threshold /= 10; if (max < threshold) continue; max = 0; for (pAk = const_cast<Real *>(A), k = 0; k < (n - 1); pAk += n, k++) { for (pAm = pAk + n, m = k + 1; m < n; pAm += n, m++) { if (std::abs(*(pAk + m)) < threshold) continue; // Calculate the sin and cos of the rotation angle which // annihilates A[k][m]. cot_2phi = 0.5f * (*(pAk + k) - *(pAm + m)) / *(pAk + m); dum1 = sqrt(cot_2phi * cot_2phi + 1); if (cot_2phi < 0) dum1 = -dum1; tan_phi = -cot_2phi + dum1; tan2_phi = tan_phi * tan_phi; sin2_phi = tan2_phi / (1 + tan2_phi); cos2_phi = 1 - sin2_phi; sin_phi = sqrt(sin2_phi); if (tan_phi < 0) sin_phi = -sin_phi; cos_phi = sqrt(cos2_phi); sin_2phi = 2 * sin_phi * cos_phi; cos_2phi = cos2_phi - sin2_phi; // Rotate columns k and m for both the matrix A // and the matrix of eigenvectors. p_r = const_cast<Real *>(A); dum1 = *(pAk + k); dum2 = *(pAm + m); dum3 = *(pAk + m); *(pAk + k) = dum1 * cos2_phi + dum2 * sin2_phi + dum3 * sin_2phi; *(pAm + m) = dum1 * sin2_phi + dum2 * cos2_phi - dum3 * sin_2phi; *(pAk + m) = 0; *(pAm + k) = 0; for (i = 0; i < n; p_r += n, i++) { if ((i == k) || (i == m)) continue; if (i < k) dum1 = *(p_r + k); else dum1 = *(pAk + i); if (i < m) dum2 = *(p_r + m); else dum2 = *(pAm + i); dum3 = dum1 * cos_phi + dum2 * sin_phi; if (i < k) *(p_r + k) = dum3; else *(pAk + i) = dum3; dum3 = -dum1 * sin_phi + dum2 * cos_phi; if (i < m) *(p_r + m) = dum3; else *(pAm + i) = dum3; } for (p_e = eigenvectors, i = 0; i < n; p_e += n, i++) { dum1 = *(p_e + k); dum2 = *(p_e + m); *(p_e + k) = dum1 * cos_phi + dum2 * sin_phi; *(p_e + m) = -dum1 * sin_phi + dum2 * cos_phi; } } for (i = 0; i < n; i++) if (i == k) continue; else if (max < std::abs(*(pAk + i))) max = std::abs(*(pAk + i)); } } for (pAk = const_cast<Real *>(A), k = 0; k < n; pAk += n, k++) eigenvalues[k] = *(pAk + k); } } // Namespace helpme #endif // Header guard // original file: src/string_utils.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_STRING_UTIL_H_ #define _HELPME_STANDALONE_STRING_UTIL_H_ #include <complex> #include <iomanip> #include <iostream> #include <sstream> #include <string> namespace helpme { /*! * \brief makes a string representation of a floating point number. * \param width the width used to display the number. * \param precision the precision used to display the number. * \return the string representation of the floating point number. */ template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0> std::string formatNumber(const T &number, int width, int precision) { std::stringstream stream; stream.setf(std::ios::fixed, std::ios::floatfield); stream << std::setw(width) << std::setprecision(precision) << number; return stream.str(); } /*! * \brief makes a string representation of a complex number. * \param width the width used to display the real and the imaginary components. * \param precision the precision used to display the real and the imaginary components. * \return the string representation of the complex number. */ template <typename T, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0> std::string formatNumber(const T &number, int width, int precision) { std::stringstream stream; stream.setf(std::ios::fixed, std::ios::floatfield); stream << "(" << std::setw(width) << std::setprecision(precision) << number.real() << ", " << std::setw(width) << std::setprecision(precision) << number.imag() << ")"; return stream.str(); } /*! * \brief makes a string representation of a multdimensional tensor, stored in a flat array. * \param data pointer to the start of the array holding the tensor information. * \param size the length of the array holding the tensor information. * \param rowDim the dimension of the fastest running index. * \param width the width of each individual floating point number. * \param precision used to display each floating point number. * \return the string representation of the tensor. */ template <typename T> std::string stringify(T *data, size_t size, size_t rowDim, int width = 14, int precision = 8) { std::stringstream stream; for (size_t ind = 0; ind < size; ++ind) { stream << formatNumber(data[ind], width, precision); if (ind % rowDim == rowDim - 1) stream << std::endl; else stream << " "; } return stream.str(); } } // Namespace helpme #endif // Header guard // original file: src/memory.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_MEMORY_H_ #define _HELPME_STANDALONE_MEMORY_H_ #include <stdexcept> #include <vector> #include <fftw3.h> namespace helpme { /*! * \brief FFTWAllocator a class to handle aligned allocation of memory using the FFTW libraries. * Code is adapted from http://www.josuttis.com/cppcode/myalloc.hpp.html. */ template <class T> class FFTWAllocator { public: // type definitions typedef T value_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; // rebind allocator to type U template <class U> struct rebind { typedef FFTWAllocator<U> other; }; // return address of values pointer address(reference value) const { return &value; } const_pointer address(const_reference value) const { return &value; } /* constructors and destructor * - nothing to do because the allocator has no state */ FFTWAllocator() throw() {} FFTWAllocator(const FFTWAllocator&) throw() {} template <class U> FFTWAllocator(const FFTWAllocator<U>&) throw() {} ~FFTWAllocator() throw() {} FFTWAllocator& operator=(FFTWAllocator other) throw() {} template <class U> FFTWAllocator& operator=(FFTWAllocator<U> other) throw() {} // return maximum number of elements that can be allocated size_type max_size() const throw() { return std::numeric_limits<std::size_t>::max() / sizeof(T); } // allocate but don't initialize num elements of type T pointer allocate(size_type num, const void* = 0) { return static_cast<pointer>(fftw_malloc(num * sizeof(T))); } // initialize elements of allocated storage p with value value void construct(pointer p, const T& value) { // initialize memory with placement new new ((void*)p) T(value); } // destroy elements of initialized storage p void destroy(pointer p) {} // deallocate storage p of deleted elements void deallocate(pointer p, size_type num) { fftw_free(static_cast<void*>(p)); } }; // return that all specializations of this allocator are interchangeable template <class T1, class T2> bool operator==(const FFTWAllocator<T1>&, const FFTWAllocator<T2>&) throw() { return true; } template <class T1, class T2> bool operator!=(const FFTWAllocator<T1>&, const FFTWAllocator<T2>&) throw() { return false; } template <typename Real> using vector = std::vector<Real, FFTWAllocator<Real>>; } // Namespace helpme #endif // Header guard namespace helpme { /*! * A helper function to transpose a dense matrix in place, gratuitously stolen from * https://stackoverflow.com/questions/9227747/in-place-transposition-of-a-matrix */ template <class RandomIterator> void transposeMemoryInPlace(RandomIterator first, RandomIterator last, int m) { const int mn1 = (last - first - 1); const int n = (last - first) / m; std::vector<bool> visited(last - first); RandomIterator cycle = first; while (++cycle != last) { if (visited[cycle - first]) continue; int a = cycle - first; do { a = a == mn1 ? mn1 : (n * a) % mn1; std::swap(*(first + a), *cycle); visited[a] = true; } while ((first + a) != cycle); } } /*! * \brief The Matrix class is designed to serve as a convenient wrapper to simplify 2D matrix operations. * It assumes dense matrices with contiguious data and the fast running index being the right * (column) index. The underlying memory may have already been allocated elsewhere by C, Fortran * or Python, and is directly manipulated in place, saving an expensive copy operation. To provide * read-only access to such memory address, use a const template type. */ template <typename Real> class Matrix { protected: /// The number of rows in the matrix. size_t nRows_; /// The number of columns in the matrix. size_t nCols_; /// A vector to conveniently allocate data, if we really need to. helpme::vector<Real> allocatedData_; /// Pointer to the raw data, whose allocation may not be controlled by us. Real* data_; public: enum class SortOrder { Ascending, Descending }; inline const Real& operator()(int row, int col) const { return *(data_ + row * nCols_ + col); } inline const Real& operator()(const std::pair<int, int>& indices) const { return *(data_ + std::get<0>(indices) * nCols_ + std::get<1>(indices)); } inline Real& operator()(int row, int col) { return *(data_ + row * nCols_ + col); } inline Real& operator()(const std::pair<int, int>& indices) { return *(data_ + std::get<0>(indices) * nCols_ + std::get<1>(indices)); } inline const Real* operator[](int row) const { return data_ + row * nCols_; } inline Real* operator[](int row) { return data_ + row * nCols_; } Real* begin() const { return data_; } Real* end() const { return data_ + nRows_ * nCols_; } const Real* cbegin() const { return data_; } const Real* cend() const { return data_ + nRows_ * nCols_; } /*! * \brief The sliceIterator struct provides a read-only view of a sub-block of a matrix, with arbitrary size. */ struct sliceIterator { Real *begin_, *end_, *ptr_; size_t stride_; sliceIterator(Real* start, Real* end, size_t stride) : begin_(start), end_(end), ptr_(start), stride_(stride) {} sliceIterator begin() const { return sliceIterator(begin_, end_, stride_); } sliceIterator end() const { return sliceIterator(end_, end_, 0); } sliceIterator cbegin() const { return sliceIterator(begin_, end_, stride_); } sliceIterator cend() const { return sliceIterator(end_, end_, 0); } bool operator!=(const sliceIterator& other) { return ptr_ != other.ptr_; } sliceIterator operator*=(Real val) { for (auto& element : *this) element *= val; return *this; } sliceIterator operator/=(Real val) { Real invVal = 1 / val; for (auto& element : *this) element *= invVal; return *this; } sliceIterator operator-=(Real val) { for (auto& element : *this) element -= val; return *this; } sliceIterator operator+=(Real val) { for (auto& element : *this) element += val; return *this; } sliceIterator operator++() { ptr_ += stride_; return *this; } const Real& operator[](size_t index) const { return *(begin_ + index); } size_t size() const { return std::distance(begin_, end_) / stride_; } void assertSameSize(const sliceIterator& other) const { if (size() != other.size()) throw std::runtime_error("Slice operations only supported for slices of the same size."); } void assertContiguous(const sliceIterator& iter) const { if (iter.stride_ != 1) throw std::runtime_error( "Slice operations called on operation that is only allowed for contiguous data."); } Matrix<Real> operator-(const sliceIterator& other) const { assertSameSize(other); assertContiguous(*this); assertContiguous(other); Matrix ret(1, size()); std::transform(begin_, end_, other.begin_, ret[0], [](const Real& a, const Real& b) -> Real { return a - b; }); return ret; } sliceIterator operator-=(const sliceIterator& other) const { assertSameSize(other); assertContiguous(*this); assertContiguous(other); std::transform(begin_, end_, other.begin_, begin_, [](const Real& a, const Real& b) -> Real { return a - b; }); return *this; } sliceIterator operator+=(const sliceIterator& other) const { assertSameSize(other); assertContiguous(*this); assertContiguous(other); std::transform(begin_, end_, other.begin_, begin_, [](const Real& a, const Real& b) -> Real { return a + b; }); return *this; } Real& operator*() { return *ptr_; } }; /*! * \brief row returns a read-only iterator over a given row. * \param r the row to return. * \return the slice in memory corresponding to the rth row. */ sliceIterator row(size_t r) const { return sliceIterator(data_ + r * nCols_, data_ + (r + 1) * nCols_, 1); } /*! * \brief col returns a read-only iterator over a given column. * \param c the column to return. * \return the slice in memory corresponding to the cth column. */ sliceIterator col(size_t c) const { return sliceIterator(data_ + c, data_ + nRows_ * nCols_ + c, nCols_); } /*! * \return the number of rows in this matrix. */ size_t nRows() const { return nRows_; } /*! * \return the number of columns in this matrix. */ size_t nCols() const { return nCols_; } /*! * \brief Matrix Constructs an empty matrix. */ Matrix() : nRows_(0), nCols_(0) {} /*! * \brief Matrix Constructs a new matrix, allocating memory. * \param nRows the number of rows in the matrix. * \param nCols the number of columns in the matrix. */ Matrix(size_t nRows, size_t nCols) : nRows_(nRows), nCols_(nCols), allocatedData_(nRows * nCols, 0), data_(allocatedData_.data()) {} /*! * \brief Matrix Constructs a new matrix, allocating memory. * \param filename the ASCII file from which to read this matrix */ Matrix(const std::string& filename) { Real tmp; std::ifstream inFile(filename); if (!inFile) { std::string msg("Unable to open file "); msg += filename; throw std::runtime_error(msg); } inFile >> nRows_; inFile >> nCols_; while (inFile >> tmp) allocatedData_.push_back(tmp); inFile.close(); if (nRows_ * nCols_ != allocatedData_.size()) { allocatedData_.clear(); std::string msg("Inconsistent dimensions in "); msg += filename; msg += ". Amount of data inconsitent with declared size."; throw std::runtime_error(msg); } allocatedData_.shrink_to_fit(); data_ = allocatedData_.data(); } /*! * \brief Matrix Constructs a new matrix, allocating memory and initializing values using the braced initializer. * \param data a braced initializer list of braced initializer lists containing the values to be stored in the * matrix. */ Matrix(std::initializer_list<std::initializer_list<Real>> data) { nRows_ = data.size(); nCols_ = nRows_ ? data.begin()->size() : 0; allocatedData_.reserve(nRows_ * nCols_); for (auto& row : data) { if (row.size() != nCols_) throw std::runtime_error("Inconsistent row dimensions in matrix specification."); allocatedData_.insert(allocatedData_.end(), row.begin(), row.end()); } data_ = allocatedData_.data(); } /*! * \brief Matrix Constructs a new column vector, allocating memory and initializing values using the braced * initializer. \param data a braced initializer list of braced initializer lists containing the values to be stored * in the matrix. */ Matrix(std::initializer_list<Real> data) : allocatedData_(data), data_(allocatedData_.data()) { nRows_ = data.size(); nCols_ = 1; } /*! * \brief Matrix Constructs a new matrix using already allocated memory. * \param ptr the already-allocated memory underlying this matrix. * \param nRows the number of rows in the matrix. * \param nCols the number of columns in the matrix. */ Matrix(Real* ptr, size_t nRows, size_t nCols) : nRows_(nRows), nCols_(nCols), data_(ptr) {} /*! * \brief cast make a copy of this matrix, with its elements cast as a different type. * \tparam NewReal the type to cast each element to. * \return the copy of the matrix with the new type. */ template <typename NewReal> Matrix<NewReal> cast() const { Matrix<NewReal> newMat(nRows_, nCols_); NewReal* newPtr = newMat[0]; const Real* dataPtr = data_; for (size_t addr = 0; addr < nRows_ * nCols_; ++addr) *newPtr++ = static_cast<NewReal>(*dataPtr++); return newMat; } /*! * \brief setConstant sets all elements of this matrix to a specified value. * \param value the value to set each element to. */ void setConstant(Real value) { std::fill(begin(), end(), value); } /*! * \brief setZero sets each element of this matrix to zero. */ void setZero() { setConstant(0); } /*! * \brief isNearZero checks that each element in this matrix has an absolute value below some threshold. * \param threshold the value below which an element is considered zero. * \return whether all values are near zero or not. */ bool isNearZero(Real threshold = 1e-10f) const { return !std::any_of(cbegin(), cend(), [&](const Real& val) { return std::abs(val) > threshold; }); } /*! * \brief inverse inverts this matrix, leaving the original matrix untouched. * \return the inverse of this matrix. */ Matrix inverse() const { assertSquare(); Matrix matrixInverse(nRows_, nRows_); if (nRows() == 3) { // 3x3 is a really common case, so treat it here as. Real determinant = data_[0] * (data_[4] * data_[8] - data_[7] * data_[5]) - data_[1] * (data_[3] * data_[8] - data_[5] * data_[6]) + data_[2] * (data_[3] * data_[7] - data_[4] * data_[6]); Real determinantInverse = 1 / determinant; matrixInverse.data_[0] = (data_[4] * data_[8] - data_[7] * data_[5]) * determinantInverse; matrixInverse.data_[1] = (data_[2] * data_[7] - data_[1] * data_[8]) * determinantInverse; matrixInverse.data_[2] = (data_[1] * data_[5] - data_[2] * data_[4]) * determinantInverse; matrixInverse.data_[3] = (data_[5] * data_[6] - data_[3] * data_[8]) * determinantInverse; matrixInverse.data_[4] = (data_[0] * data_[8] - data_[2] * data_[6]) * determinantInverse; matrixInverse.data_[5] = (data_[3] * data_[2] - data_[0] * data_[5]) * determinantInverse; matrixInverse.data_[6] = (data_[3] * data_[7] - data_[6] * data_[4]) * determinantInverse; matrixInverse.data_[7] = (data_[6] * data_[1] - data_[0] * data_[7]) * determinantInverse; matrixInverse.data_[8] = (data_[0] * data_[4] - data_[3] * data_[1]) * determinantInverse; } else { // Generic case; just use spectral decomposition, invert the eigenvalues, and stitch back together. // Note that this only works for symmetric matrices. Need to hook into Lapack for a general // inversion routine if this becomes a limitation. return this->applyOperation([](Real& element) { element = 1 / element; }); } return matrixInverse; } /*! * \brief assertSymmetric checks that this matrix is symmetric within some threshold. * \param threshold the value below which an pair's difference is considered zero. */ void assertSymmetric(const Real& threshold = 1e-10f) const { assertSquare(); for (int row = 0; row < nRows_; ++row) { for (int col = 0; col < row; ++col) { if (std::abs(data_[row * nCols_ + col] - data_[col * nCols_ + row]) > threshold) throw std::runtime_error("Unexpected non-symmetric matrix found."); } } } /*! * \brief applyOperationToEachElement modifies every element in the matrix by applying an operation. * \param function a unary operator describing the operation to perform. */ void applyOperationToEachElement(const std::function<void(Real&)>& fxn) { std::for_each(begin(), end(), fxn); } /*! * \brief applyOperation applies an operation to this matrix using the spectral decomposition, * leaving the original untouched. Only for symmetric matrices, as coded. * \param function a undary operator describing the operation to perform. * \return the matrix transformed by the operator. */ Matrix applyOperation(const std::function<void(Real&)>& function) const { assertSymmetric(); auto eigenPairs = this->diagonalize(); Matrix evalsReal = std::get<0>(eigenPairs); Matrix evecs = std::get<1>(eigenPairs); evalsReal.applyOperationToEachElement(function); Matrix evecsT = evecs.transpose(); for (int row = 0; row < nRows_; ++row) { Real transformedEigenvalue = evalsReal[row][0]; std::for_each(evecsT.data_ + row * nCols_, evecsT.data_ + (row + 1) * nCols_, [&](Real& val) { val *= transformedEigenvalue; }); } return evecs * evecsT; } /*! * \brief assertSameSize make sure that this Matrix has the same dimensions as another Matrix. * \param other the matrix to compare to. */ void assertSameSize(const Matrix& other) const { if (nRows_ != other.nRows_ || nCols_ != other.nCols_) throw std::runtime_error("Attepting to compare matrices of different sizes!"); } /*! * \brief assertSquare make sure that this Matrix is square. */ void assertSquare() const { if (nRows_ != nCols_) throw std::runtime_error("Attepting to perform a square matrix operation on a non-square matrix!"); } /*! * \brief multiply this matrix with another, returning a new matrix containing the product. * \param other the right hand side of the matrix product. * \return the product of this matrix with the matrix other. */ Matrix multiply(const Matrix& other) const { // TODO one fine day this should be replaced by GEMM calls, if matrix multiplies actually get used much. if (nCols_ != other.nRows_) throw std::runtime_error("Attempting to multiply matrices with incompatible dimensions."); Matrix product(nRows_, other.nCols_); Real* output = product.data_; for (int row = 0; row < nRows_; ++row) { const Real* rowPtr = data_ + row * nCols_; for (int col = 0; col < other.nCols_; ++col) { for (int link = 0; link < nCols_; ++link) { *output += rowPtr[link] * other.data_[link * other.nCols_ + col]; } ++output; } } return product; } /*! * \brief operator * a convenient wrapper for the multiply function. * \param other the right hand side of the matrix product. * \return the product of this matrix with the matrix other. */ Matrix operator*(const Matrix& other) const { return this->multiply(other); } /*! * \brief operator * scale a copy of this matrix by a constant, leaving the orignal untouched. * \param scaleFac the scale factor to apply. * \return the scaled version of this matrix. */ Matrix operator*(Real scaleFac) const { auto scaled = this->clone(); scaled.applyOperationToEachElement([&](Real& element) { element *= scaleFac; }); return scaled; } /*! * \brief increment this matrix with another, returning a new matrix containing the sum. * \param other the right hand side of the matrix sum. * \return the sum of this matrix and the matrix other. */ Matrix incrementWith(const Matrix& other) { assertSameSize(other); std::transform(begin(), end(), other.begin(), begin(), [](const Real& a, const Real& b) -> Real { return a + b; }); return *this; } /*! * \brief a wrapper around the incrementWith() function. * \param other the right hand side of the matrix sum. * \return the sum of this matrix and the matrix other. */ Matrix operator+=(const Matrix& other) { return this->incrementWith(other); } /*! * \brief increment every element of this matrix by a constant another, returning a new matrix containing the sum. * \param other the right hand side of the matrix sum. * \return the sum of this matrix and the matrix other. */ Matrix incrementWith(const Real& shift) { std::for_each(begin(), end(), [shift](Real& a) { a += shift; }); return *this; } /*! * \brief a wrapper around the incrementWith() function. * \param shift the scalar to increment each value by * \return the sum of this matrix and the matrix other. */ Matrix operator+=(const Real& shift) { return this->incrementWith(shift); } /*! * \brief almostEquals checks that two matrices have all elements the same, within some specificied tolerance. * \param other the matrix against which we're comparing. * \param tol the amount that each element is allowed to deviate by. * \return whether the two matrices are almost equal. */ template <typename T = Real, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0> bool almostEquals(const Matrix& other, Real tolerance = 1e-6) const { // The floating point version assertSameSize(other); return std::equal(cbegin(), cend(), other.cbegin(), [&tolerance](Real a, Real b) -> bool { return (((a - b) < std::real(tolerance)) && ((a - b) > -std::real(tolerance))); }); } template <typename T = Real, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0> bool almostEquals(const Matrix& other, Real tolerance = 1e-6) const { // The complex version assertSameSize(other); auto tol = std::real(tolerance); // This is a little confusing, but the type "Real" is actually some king of std::complex<...>. return std::equal(cbegin(), cend(), other.cbegin(), [&tol](Real a, Real b) -> bool { return (((a.real() - b.real()) < tol) && ((a.real() - b.real()) > -tol) && ((a.imag() - b.imag()) < tol) && ((a.imag() - b.imag()) > -tol)); }); } /*! * \brief dot computes the inner product of this matrix with another. * \param other the other matrix in the inner product, which must have the same dimensions. * \return the inner product. */ Real dot(const Matrix& other) const { assertSameSize(other); return std::inner_product(cbegin(), cend(), other.cbegin(), Real(0)); } /*! * \brief writeToFile formats the matrix and writes to an ASCII file. * \param fileName the name of the file to save to. * \param width the width of each matrix element's formatted representation. * \param precision the precision of each matrix element's formatted representation. * \param printDimensions whether to print the dimensions at the top of the file. */ void writeToFile(const std::string& filename, int width = 20, int precision = 14, bool printDimensions = false) const { std::ofstream file; file.open(filename, std::ios::out); if (printDimensions) file << nRows_ << " " << nCols_ << std::endl; file << stringify(data_, nRows_ * nCols_, nCols_, width, precision); file.close(); } /*! * \brief write formatted matrix to a stream object. * \param os stream object to write to. * \return modified stream object. */ std::ostream& write(std::ostream& os) const { for (int row = 0; row < nRows_; ++row) os << stringify(data_ + row * nCols_, nCols_, nCols_); os << std::endl; return os; } /*! * \brief transposeInPlace transposes this matrix in place. */ void transposeInPlace() { transposeMemoryInPlace(begin(), end(), nCols_); std::swap(nCols_, nRows_); } /*! * \brief clone make a new copy of this matrix by deep copying the data. * \return the copy of this matrix. */ Matrix clone() const { Matrix newMatrix = Matrix(nRows_, nCols_); std::copy(cbegin(), cend(), newMatrix.begin()); return newMatrix; } /*! * \brief transpose this matrix, leaving the original untouched. * \return a transposed deep copy of this matrix. */ Matrix transpose() const { Matrix copy = this->clone(); copy.transposeInPlace(); return copy; } /*! * \brief diagonalize diagonalize this matrix, leaving the original untouched. Note that this assumes * that this matrix is real and symmetric. * \param order how to order the (eigenvalue,eigenvector) pairs, where the sort key is the eigenvalue. * \return a pair of corresponding <eigenvalue , eigenvectors> sorted according to the order variable. * The eigenvectors are stored by column. */ std::pair<Matrix<Real>, Matrix<Real>> diagonalize(SortOrder order = SortOrder::Ascending) const { assertSymmetric(); Matrix eigenValues(nRows_, 1); Matrix unsortedEigenVectors(nRows_, nRows_); Matrix sortedEigenVectors(nRows_, nRows_); JacobiCyclicDiagonalization<Real>(eigenValues[0], unsortedEigenVectors[0], cbegin(), nRows_); unsortedEigenVectors.transposeInPlace(); std::vector<std::pair<Real, const Real*>> eigenPairs; for (int val = 0; val < nRows_; ++val) eigenPairs.push_back({eigenValues[val][0], unsortedEigenVectors[val]}); std::sort(eigenPairs.begin(), eigenPairs.end()); if (order == SortOrder::Descending) std::reverse(eigenPairs.begin(), eigenPairs.end()); for (int val = 0; val < nRows_; ++val) { const auto& e = eigenPairs[val]; eigenValues.data_[val] = std::get<0>(e); std::copy(std::get<1>(e), std::get<1>(e) + nCols_, sortedEigenVectors[val]); } sortedEigenVectors.transposeInPlace(); return {std::move(eigenValues), std::move(sortedEigenVectors)}; } }; /*! * A helper function to allow printing of Matrix objects to a stream. */ template <typename Real> std::ostream& operator<<(std::ostream& os, Matrix<Real> const& m) { return m.write(os); } } // Namespace helpme #endif // Header guard #include <vector> namespace helpme { static inline int cartesianAddress(int lx, int ly, int lz) { int l = lx + ly + lz; return lz * (2 * l - lz + 3) / 2 + ly; } /*! * \brief makeCartesianRotationMatrix builds a rotation matrix for unique Cartesian * components with a given angular momentum. The algorithm used here is the simple * version (eq. 18) from D. M. Elking, J. Comp. Chem., 37 2067 (2016). It's definitely * not the fastest way to do it, but will be revisited if profiling shows it to be an issue. * \param angularMomentum the angular momentum of the rotation matrix desired. * \param transformer the matrix R to do the transform defined for a dipole as µ_new = R . µ_old. * \return the rotation matrix */ template <typename Real> Matrix<Real> makeCartesianRotationMatrix(int angularMomentum, const Matrix<Real> &transformer) { Real R00 = transformer[0][0]; Real R01 = transformer[0][1]; Real R02 = transformer[0][2]; Real R10 = transformer[1][0]; Real R11 = transformer[1][1]; Real R12 = transformer[1][2]; Real R20 = transformer[2][0]; Real R21 = transformer[2][1]; Real R22 = transformer[2][2]; int nComponents = (angularMomentum + 1) * (angularMomentum + 2) / 2; auto factorial = std::vector<int>(2 * angularMomentum + 1); factorial[0] = 1; for (int l = 1; l <= 2 * angularMomentum; ++l) factorial[l] = l * factorial[l - 1]; Matrix<Real> R(nComponents, nComponents); for (int nz = 0; nz <= angularMomentum; ++nz) { for (int ny = 0; ny <= angularMomentum - nz; ++ny) { int nx = angularMomentum - ny - nz; for (int pz = 0; pz <= nx; ++pz) { for (int py = 0; py <= nx - pz; ++py) { int px = nx - py - pz; for (int qz = 0; qz <= ny; ++qz) { for (int qy = 0; qy <= ny - qz; ++qy) { int qx = ny - qy - qz; for (int rz = 0; rz <= nz; ++rz) { for (int ry = 0; ry <= nz - rz; ++ry) { int rx = nz - ry - rz; int mx = px + qx + rx; int my = py + qy + ry; int mz = pz + qz + rz; int m = mx + my + mz; if (m == angularMomentum) { Real normx = factorial[mx] / (factorial[px] * factorial[qx] * factorial[rx]); Real normy = factorial[my] / (factorial[py] * factorial[qy] * factorial[ry]); Real normz = factorial[mz] / (factorial[pz] * factorial[qz] * factorial[rz]); Real Rx = std::pow(R00, px) * std::pow(R10, py) * std::pow(R20, pz); Real Ry = std::pow(R01, qx) * std::pow(R11, qy) * std::pow(R21, qz); Real Rz = std::pow(R02, rx) * std::pow(R12, ry) * std::pow(R22, rz); Real term = normx * normy * normz * Rx * Ry * Rz; R[cartesianAddress(mx, my, mz)][cartesianAddress(nx, ny, nz)] += term; } } } } } } } } } return R; } /*! * \brief matrixVectorProduct A naive implementation of matrix-vector products, avoiding BLAS requirements (for now). * \param transformer the transformation matrix. * \param inputVector the vector to be transformed. * \param outputVector the transformed vector. */ template <typename Real> void matrixVectorProduct(const Matrix<Real> &transformer, const Real *inputVector, Real *outputVector) { int dimension = transformer.nRows(); for (int row = 0; row < dimension; ++row) { outputVector[row] = std::inner_product(inputVector, inputVector + dimension, transformer[row], Real(0)); } } /*! * \brief cartesianTransform transforms a list of a cartesian quantities to a different basis. * Assumes a list of quantities are to be transformed (in place) and all angular momentum * components up to and including the specified maximum are present in ascending A.M. order. * \param maxAngularMomentum the angular momentum of the incoming quantity. * \param transformOnlyThisShell if true, only the shell with angular momentum specified will be transformed * \param transformer the matrix R to do the transform defined for a dipole as µ_new = R . µ_old. * \param transformee the quantity to be transformed, stored as nAtoms X nComponents, with * components being the fast running index. */ template <typename Real> Matrix<Real> cartesianTransform(int maxAngularMomentum, bool transformOnlyThisShell, const Matrix<Real> &transformer, const Matrix<Real> &transformee) { Matrix<Real> transformed = transformee.clone(); int offset = transformOnlyThisShell ? 0 : 1; int nAtoms = transformee.nRows(); int firstShell = transformOnlyThisShell ? maxAngularMomentum : 1; for (int angularMomentum = firstShell; angularMomentum <= maxAngularMomentum; ++angularMomentum) { auto rotationMatrix = makeCartesianRotationMatrix(angularMomentum, transformer); for (int atom = 0; atom < nAtoms; ++atom) { const Real *inputData = transformee[atom]; Real *outputData = transformed[atom]; matrixVectorProduct(rotationMatrix, inputData + offset, outputData + offset); } offset += (angularMomentum + 1) * (angularMomentum + 2) / 2; } return transformed; } } // Namespace helpme #endif // Header guard // original file: src/fftw_wrapper.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_FFTW_WRAPPER_H_ #define _HELPME_STANDALONE_FFTW_WRAPPER_H_ #include <complex> #include <iostream> #include <limits> #include <stdexcept> #include <type_traits> #include <fftw3.h> // #include "memory.h" namespace helpme { /*! * \brief The FFTWTypes class is a placeholder to lookup function names and types in FFTW parlance by template. */ template <typename Real> struct FFTWTypes { // This is just a default implementation that does nothing - we just need to be able to instantiate something // in order to query the isImplemented member at runtime to check if the desired precision model was compiled in. struct EmptyPlan { int unused; }; using Plan = void *; using Complex = std::complex<int>; static Plan makePlan4(size_t, void *, void *, int) { return 0; }; static Plan makePlan5(size_t, void *, void *, int, int) { return 0; }; static void cleanFFTW(){}; static void execPlan1(Plan){}; static void execPlan3(Plan, void *, void *){}; static constexpr bool isImplemented = false; static constexpr decltype(&makePlan4) MakeRealToComplexPlan = &makePlan4; static constexpr decltype(&makePlan4) MakeComplexToRealPlan = &makePlan4; static constexpr decltype(&makePlan5) MakeComplexToComplexPlan = &makePlan5; static constexpr decltype(&execPlan3) ExecuteRealToComplexPlan = &execPlan3; static constexpr decltype(&execPlan3) ExecuteComplexToRealPlan = &execPlan3; static constexpr decltype(&execPlan3) ExecuteComplexToComplexPlan = &execPlan3; static constexpr decltype(&execPlan1) DestroyPlan = &execPlan1; static constexpr decltype(&cleanFFTW) CleanupFFTW = nullptr; }; #if HAVE_FFTWF == 1 template <> struct FFTWTypes<float> { using Plan = fftwf_plan; using Complex = fftwf_complex; static constexpr bool isImplemented = true; static constexpr decltype(&fftwf_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftwf_plan_dft_r2c_1d; static constexpr decltype(&fftwf_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftwf_plan_dft_c2r_1d; static constexpr decltype(&fftwf_plan_dft_1d) MakeComplexToComplexPlan = &fftwf_plan_dft_1d; static constexpr decltype(&fftwf_execute_dft_r2c) ExecuteRealToComplexPlan = &fftwf_execute_dft_r2c; static constexpr decltype(&fftwf_execute_dft_c2r) ExecuteComplexToRealPlan = &fftwf_execute_dft_c2r; static constexpr decltype(&fftwf_execute_dft) ExecuteComplexToComplexPlan = &fftwf_execute_dft; static constexpr decltype(&fftwf_destroy_plan) DestroyPlan = &fftwf_destroy_plan; static constexpr decltype(&fftwf_cleanup) CleanupFFTW = &fftwf_cleanup; }; #endif // HAVE_FFTWF #if HAVE_FFTWD == 1 template <> struct FFTWTypes<double> { using Plan = fftw_plan; using Complex = fftw_complex; static constexpr bool isImplemented = true; static constexpr decltype(&fftw_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftw_plan_dft_r2c_1d; static constexpr decltype(&fftw_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftw_plan_dft_c2r_1d; static constexpr decltype(&fftw_plan_dft_1d) MakeComplexToComplexPlan = &fftw_plan_dft_1d; static constexpr decltype(&fftw_execute_dft_r2c) ExecuteRealToComplexPlan = &fftw_execute_dft_r2c; static constexpr decltype(&fftw_execute_dft_c2r) ExecuteComplexToRealPlan = &fftw_execute_dft_c2r; static constexpr decltype(&fftw_execute_dft) ExecuteComplexToComplexPlan = &fftw_execute_dft; static constexpr decltype(&fftw_destroy_plan) DestroyPlan = &fftw_destroy_plan; static constexpr decltype(&fftw_cleanup) CleanupFFTW = &fftw_cleanup; }; #endif // HAVE_FFTWD #if HAVE_FFTWL == 1 template <> struct FFTWTypes<long double> { using Plan = fftwl_plan; using Complex = fftwl_complex; static constexpr bool isImplemented = true; static constexpr decltype(&fftwl_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftwl_plan_dft_r2c_1d; static constexpr decltype(&fftwl_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftwl_plan_dft_c2r_1d; static constexpr decltype(&fftwl_plan_dft_1d) MakeComplexToComplexPlan = &fftwl_plan_dft_1d; static constexpr decltype(&fftwl_execute_dft_r2c) ExecuteRealToComplexPlan = &fftwl_execute_dft_r2c; static constexpr decltype(&fftwl_execute_dft_c2r) ExecuteComplexToRealPlan = &fftwl_execute_dft_c2r; static constexpr decltype(&fftwl_execute_dft) ExecuteComplexToComplexPlan = &fftwl_execute_dft; static constexpr decltype(&fftwl_destroy_plan) DestroyPlan = &fftwl_destroy_plan; static constexpr decltype(&fftwl_cleanup) CleanupFFTW = &fftwl_cleanup; }; #endif // HAVE_FFTWL /*! * \brief The FFTWWrapper class is a convenient wrapper to abstract away the details of different * precision modes for FFTW, where the types and function names differ. */ template <typename Real> class FFTWWrapper { using typeinfo = FFTWTypes<Real>; using Plan = typename typeinfo::Plan; using Complex = typename typeinfo::Complex; protected: /// An FFTW plan object, describing out of place complex to complex forward transforms. typename typeinfo::Plan forwardPlan_ = nullptr; /// An FFTW plan object, describing out of place complex to complex inverse transforms. typename typeinfo::Plan inversePlan_ = nullptr; /// An FFTW plan object, describing in place complex to complex forward transforms. typename typeinfo::Plan forwardInPlacePlan_ = nullptr; /// An FFTW plan object, describing in place complex to complex inverse transforms. typename typeinfo::Plan inverseInPlacePlan_ = nullptr; /// An FFTW plan object, describing out of place real to complex forward transforms. typename typeinfo::Plan realToComplexPlan_ = nullptr; /// An FFTW plan object, describing out of place complex to real inverse transforms. typename typeinfo::Plan complexToRealPlan_ = nullptr; /// The size of the real data. size_t fftDimension_; /// The flags to be passed to the FFTW plan creator, to determine startup cost. unsigned transformFlags_; public: FFTWWrapper() {} FFTWWrapper(size_t fftDimension) : fftDimension_(fftDimension), transformFlags_(FFTW_ESTIMATE) { if (!typeinfo::isImplemented) { throw std::runtime_error( "Attempting to call FFTW using a precision mode that has not been linked. " "Make sure that -DHAVE_FFTWF=1, -DHAVE_FFTWD=1 or -DHAVE_FFTWL=1 is added to the compiler flags" "for single, double and long double precision support, respectively."); } helpme::vector<Real> realTemp(fftDimension_); helpme::vector<std::complex<Real>> complexTemp1(fftDimension_); helpme::vector<std::complex<Real>> complexTemp2(fftDimension_); Real *realPtr = realTemp.data(); Complex *complexPtr1 = reinterpret_cast<Complex *>(complexTemp1.data()); Complex *complexPtr2 = reinterpret_cast<Complex *>(complexTemp2.data()); forwardPlan_ = typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr2, FFTW_FORWARD, transformFlags_); inversePlan_ = typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr2, FFTW_BACKWARD, transformFlags_); forwardInPlacePlan_ = typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr1, FFTW_FORWARD, transformFlags_); inverseInPlacePlan_ = typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr1, FFTW_BACKWARD, transformFlags_); realToComplexPlan_ = typeinfo::MakeRealToComplexPlan(fftDimension_, realPtr, complexPtr1, transformFlags_); complexToRealPlan_ = typeinfo::MakeComplexToRealPlan(fftDimension_, complexPtr1, realPtr, transformFlags_); } FFTWWrapper(const FFTWWrapper &other) = delete; FFTWWrapper(FFTWWrapper &&other) = delete; FFTWWrapper &operator=(const FFTWWrapper &other) = delete; FFTWWrapper &operator=(FFTWWrapper &&other) { std::swap(forwardPlan_, other.forwardPlan_); std::swap(forwardInPlacePlan_, other.forwardInPlacePlan_); std::swap(inversePlan_, other.inversePlan_); std::swap(inverseInPlacePlan_, other.inverseInPlacePlan_); std::swap(realToComplexPlan_, other.realToComplexPlan_); std::swap(complexToRealPlan_, other.complexToRealPlan_); std::swap(fftDimension_, other.fftDimension_); std::swap(transformFlags_, other.transformFlags_); return *this; } ~FFTWWrapper() { if (forwardPlan_) typeinfo::DestroyPlan(forwardPlan_); if (inversePlan_) typeinfo::DestroyPlan(inversePlan_); if (forwardInPlacePlan_) typeinfo::DestroyPlan(forwardInPlacePlan_); if (inverseInPlacePlan_) typeinfo::DestroyPlan(inverseInPlacePlan_); if (realToComplexPlan_) typeinfo::DestroyPlan(realToComplexPlan_); if (complexToRealPlan_) typeinfo::DestroyPlan(complexToRealPlan_); } /*! * \brief transform call FFTW to do an out of place complex to real FFT. * \param inBuffer the location of the input data. * \param outBuffer the location of the output data. */ void transform(std::complex<Real> *inBuffer, Real *outBuffer) { typeinfo::ExecuteComplexToRealPlan(complexToRealPlan_, reinterpret_cast<Complex *>(inBuffer), outBuffer); } /*! * \brief transform call FFTW to do an out of place real to complex FFT. * \param inBuffer the location of the input data. * \param outBuffer the location of the output data. */ void transform(Real *inBuffer, std::complex<Real> *outBuffer) { typeinfo::ExecuteRealToComplexPlan(realToComplexPlan_, inBuffer, reinterpret_cast<Complex *>(outBuffer)); } /*! * \brief transform call FFTW to do an in place complex to complex FFT. * \param inPlaceBuffer the location of the input and output data. * \param direction either FFTW_FORWARD or FFTW_BACKWARD. */ void transform(std::complex<Real> *inPlaceBuffer, int direction) { Complex *inPlacePtr = reinterpret_cast<Complex *>(inPlaceBuffer); switch (direction) { case FFTW_FORWARD: typeinfo::ExecuteComplexToComplexPlan(forwardInPlacePlan_, inPlacePtr, inPlacePtr); break; case FFTW_BACKWARD: typeinfo::ExecuteComplexToComplexPlan(inverseInPlacePlan_, inPlacePtr, inPlacePtr); break; default: throw std::runtime_error("Invalid FFTW transform passed to in place transform()."); } } /*! * \brief transform call FFTW to do an out of place complex to complex FFT. * \param inBuffer the location of the input data. * \param outBuffer the location of the output data. * \param direction either FFTW_FORWARD or FFTW_BACKWARD. */ void transform(std::complex<Real> *inBuffer, std::complex<Real> *outBuffer, int direction) { Complex *inPtr = reinterpret_cast<Complex *>(inBuffer); Complex *outPtr = reinterpret_cast<Complex *>(outBuffer); switch (direction) { case FFTW_FORWARD: typeinfo::ExecuteComplexToComplexPlan(forwardPlan_, inPtr, outPtr); break; case FFTW_BACKWARD: typeinfo::ExecuteComplexToComplexPlan(inversePlan_, inPtr, outPtr); break; default: throw std::runtime_error("Invalid FFTW transform passed to transform()."); } } }; } // Namespace helpme #endif // Header guard // original file: src/gamma.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_GAMMA_H_ #define _HELPME_STANDALONE_GAMMA_H_ #include <cmath> #include <limits> /*! * \file gamma.h * \brief Contains C++ implementations of templated gamma and incomplete gamma functions, computed using recursion. */ namespace helpme { #define HELPME_SQRTTWO std::sqrt(static_cast<Real>(2)) #define HELPME_SQRTPI static_cast<Real>(1.77245385090551602729816748334114518279754945612238712821381L) #define HELPME_PI static_cast<Real>(3.14159265358979323846264338327950288419716939937510582097494L) /*! * Compute upper incomplete gamma functions for positive half-integral s values using the recursion * \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \Gamma[\frac{\mathrm{twoS}-2}{2},x] + x^{\frac{\mathrm{twoS}-2}{2}}e^{-x}\f$ */ template <typename Real, int twoS, bool isPositive> struct incompleteGammaRecursion { static Real compute(Real x) { return (0.5f * twoS - 1) * incompleteGammaRecursion<Real, twoS - 2, isPositive>::compute(x) + pow(x, (0.5f * twoS - 1)) * exp(-x); } }; /*! * Compute upper incomplete gamma functions for negative half-integral s values using the recursion * \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \frac{2\Gamma[\frac{\mathrm{twoS}+2}{2},x] - * 2x^\frac{\mathrm{twoS}}{2}e^{-x}}{\mathrm{twoS}}\f$ */ template <typename Real, int twoS> struct incompleteGammaRecursion<Real, twoS, false> { static Real compute(Real x) { return (incompleteGammaRecursion<Real, twoS + 2, false>::compute(x) - pow(x, 0.5f * twoS) * exp(-x)) / (0.5f * twoS); } }; /// Specific value of incomplete gamma function. template <typename Real> struct incompleteGammaRecursion<Real, 2, true> { static Real compute(Real x) { return exp(-x); } }; /// Specific value of incomplete gamma function. template <typename Real> struct incompleteGammaRecursion<Real, 1, false> { static Real compute(Real x) { return HELPME_SQRTPI * erfc(std::sqrt(x)); } }; /// Specific value of incomplete gamma function. template <typename Real> struct incompleteGammaRecursion<Real, 1, true> { static Real compute(Real x) { return HELPME_SQRTPI * erfc(std::sqrt(x)); } }; /// Specific value of incomplete gamma function. template <typename Real> struct incompleteGammaRecursion<Real, 0, false> { static Real compute(Real x) { // Gamma(0,x) is (minus) the exponential integral of -x. This implementation was stolen from // http://www.mymathlib.com/c_source/functions/exponential_integrals/exponential_integral_Ei.c x = -x; if (x < -5.0L) return -(Real)Continued_Fraction_Ei(x); if (x == 0.0L) return std::numeric_limits<Real>::max(); if (x < 6.8L) return -(Real)Power_Series_Ei(x); if (x < 50.0L) return -(Real)Argument_Addition_Series_Ei(x); return -(Real)Continued_Fraction_Ei(x); } private: static constexpr long double epsilon = 10.0 * std::numeric_limits<long double>::epsilon(); //////////////////////////////////////////////////////////////////////////////// // static long double Continued_Fraction_Ei( long double x ) // // // // Description: // // For x < -5 or x > 50, the continued fraction representation of Ei // // converges fairly rapidly. // // // // The continued fraction expansion of Ei(x) is: // // Ei(x) = -exp(x) { 1/(-x+1-) 1/(-x+3-) 4/(-x+5-) 9/(-x+7-) ... }. // // // // // // Arguments: // // long double x // // The argument of the exponential integral Ei(). // // // // Return Value: // // The value of the exponential integral Ei evaluated at x. // //////////////////////////////////////////////////////////////////////////////// static long double Continued_Fraction_Ei(long double x) { long double Am1 = 1.0L; long double A0 = 0.0L; long double Bm1 = 0.0L; long double B0 = 1.0L; long double a = std::exp(x); long double b = -x + 1.0L; long double Ap1 = b * A0 + a * Am1; long double Bp1 = b * B0 + a * Bm1; int j = 1; a = 1.0L; while (std::fabs(Ap1 * B0 - A0 * Bp1) > epsilon * std::fabs(A0 * Bp1)) { if (std::fabs(Bp1) > 1.0L) { Am1 = A0 / Bp1; A0 = Ap1 / Bp1; Bm1 = B0 / Bp1; B0 = 1.0L; } else { Am1 = A0; A0 = Ap1; Bm1 = B0; B0 = Bp1; } a = -j * j; b += 2.0L; Ap1 = b * A0 + a * Am1; Bp1 = b * B0 + a * Bm1; j += 1; } return (-Ap1 / Bp1); } //////////////////////////////////////////////////////////////////////////////// // static long double Power_Series_Ei( long double x ) // // // // Description: // // For -5 < x < 6.8, the power series representation for (Ei(x) - gamma // // - ln|x|)/exp(x) is used, where gamma is Euler's gamma constant. // // Note that for x = 0.0, Ei is -inf. In which case -DBL_MAX is // // returned. // // // // The power series expansion of (Ei(x) - gamma - ln|x|) / exp(x) is // // - Sum(1 + 1/2 + ... + 1/j) (-x)^j / j!, where the Sum extends // // from j = 1 to inf. // // // // Arguments: // // long double x // // The argument of the exponential integral Ei(). // // // // Return Value: // // The value of the exponential integral Ei evaluated at x. // //////////////////////////////////////////////////////////////////////////////// static long double Power_Series_Ei(long double x) { long double xn = -x; long double Sn = -x; long double Sm1 = 0.0L; long double hsum = 1.0L; long double g = 0.5772156649015328606065121L; long double y = 1.0L; long double factorial = 1.0L; while (std::fabs(Sn - Sm1) > epsilon * std::fabs(Sm1)) { Sm1 = Sn; y += 1.0L; xn *= (-x); factorial *= y; hsum += (1.0 / y); Sn += hsum * xn / factorial; } return (g + std::log(std::fabs(x)) - std::exp(x) * Sn); } //////////////////////////////////////////////////////////////////////////////// // static long double Argument_Addition_Series_Ei(long double x) // // // // Description: // // For 6.8 < x < 50.0, the argument addition series is used to calculate // // Ei. // // // // The argument addition series for Ei(x) is: // // Ei(x+dx) = Ei(x) + exp(x) Sum j! [exp(j) expj(-dx) - 1] / x^(j+1), // // where the Sum extends from j = 0 to inf, |x| > |dx| and expj(y) is // // the exponential polynomial expj(y) = Sum y^k / k!, // // the Sum extending from k = 0 to k = j. // // // // Arguments: // // long double x // // The argument of the exponential integral Ei(). // // // // Return Value: // // The value of the exponential integral Ei evaluated at x. // //////////////////////////////////////////////////////////////////////////////// static long double Argument_Addition_Series_Ei(long double x) { static long double ei[] = { 1.915047433355013959531e2L, 4.403798995348382689974e2L, 1.037878290717089587658e3L, 2.492228976241877759138e3L, 6.071406374098611507965e3L, 1.495953266639752885229e4L, 3.719768849068903560439e4L, 9.319251363396537129882e4L, 2.349558524907683035782e5L, 5.955609986708370018502e5L, 1.516637894042516884433e6L, 3.877904330597443502996e6L, 9.950907251046844760026e6L, 2.561565266405658882048e7L, 6.612718635548492136250e7L, 1.711446713003636684975e8L, 4.439663698302712208698e8L, 1.154115391849182948287e9L, 3.005950906525548689841e9L, 7.842940991898186370453e9L, 2.049649711988081236484e10L, 5.364511859231469415605e10L, 1.405991957584069047340e11L, 3.689732094072741970640e11L, 9.694555759683939661662e11L, 2.550043566357786926147e12L, 6.714640184076497558707e12L, 1.769803724411626854310e13L, 4.669055014466159544500e13L, 1.232852079912097685431e14L, 3.257988998672263996790e14L, 8.616388199965786544948e14L, 2.280446200301902595341e15L, 6.039718263611241578359e15L, 1.600664914324504111070e16L, 4.244796092136850759368e16L, 1.126348290166966760275e17L, 2.990444718632336675058e17L, 7.943916035704453771510e17L, 2.111342388647824195000e18L, 5.614329680810343111535e18L, 1.493630213112993142255e19L, 3.975442747903744836007e19L, 1.058563689713169096306e20L}; int k = (int)(x + 0.5f); int j = 0; long double xx = (long double)k; long double dx = x - xx; long double xxj = xx; long double edx = std::exp(dx); long double Sm = 1.0L; long double Sn = (edx - 1.0L) / xxj; long double term = std::numeric_limits<double>::max(); long double factorial = 1.0L; long double dxj = 1.0L; while (std::fabs(term) > epsilon * std::fabs(Sn)) { j++; factorial *= (long double)j; xxj *= xx; dxj *= (-dx); Sm += (dxj / factorial); term = (factorial * (edx * Sm - 1.0L)) / xxj; Sn += term; } return ei[k - 7] + Sn * std::exp(xx); } }; /*! * Compute gamma function for positive half-integral s values using the recursion. * \f$ \Gamma[\frac{\mathrm{twoS}}{2}] = \Gamma[\frac{\mathrm{twoS}-2}{2}]\frac{\mathrm{twoS}-2}{2} \f$ */ template <typename Real, int twoS, bool isPositive> struct gammaRecursion { static constexpr Real value = gammaRecursion<Real, twoS - 2, isPositive>::value * (0.5f * twoS - 1); }; /*! * Compute gamma function for negative half-integral s values using the recursion. * \f$ \Gamma[\frac{\mathrm{twoS}}{2}] = \frac{2\Gamma[\frac{\mathrm{twoS}_2}{2}]}{\mathrm{twoS}} \f$ * Returns infinity (expressed as the largest value representable by Real) for \f$twoS = 0, -2, -4, -6, \ldots\f$ . */ template <typename Real, int twoS> struct gammaRecursion<Real, twoS, false> { static constexpr Real value = gammaRecursion<Real, twoS + 2, false>::value == std::numeric_limits<Real>::max() ? std::numeric_limits<Real>::max() : gammaRecursion<Real, twoS + 2, false>::value / (0.5f * twoS); }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 0, false> { static constexpr Real value = std::numeric_limits<Real>::max(); }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 1, true> { static constexpr Real value = HELPME_SQRTPI; }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 1, false> { static constexpr Real value = HELPME_SQRTPI; }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 2, true> { static constexpr Real value = 1; }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 2, false> { static constexpr Real value = 1; }; /*! * \class incompleteGammaComputer * \brief Computes the upper incomplete Gamma function. * \f$ \Gamma[s,x] = \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t \f$ * In this code we only need half integral arguments for \f$s\f$, and only positive \f$x\f$ arguments. * \tparam Real the floating point type to use for arithmetic. * \tparam twoS twice the s value required. */ template <typename Real, int twoS> struct incompleteGammaComputer { /*! * \brief Computes the incomplete gamma function. * \param x value required. * \return \f$\Gamma[\frac{\mathrm{twoS}}{2}, x^2]\f$. */ static Real compute(Real x) { return incompleteGammaRecursion<Real, twoS, (twoS > 0)>::compute(x); } }; /*! * Compute upper incomplete gamma functions for positive half-integral s values using the recursion * \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \Gamma[\frac{\mathrm{twoS}-2}{2},x] + x^{\frac{\mathrm{twoS}-2}{2}}e^{-x}\f$ */ template <typename Real, int twoS, bool isPositive> struct incompleteVirialGammaRecursion { static std::pair<Real, Real> compute(Real x) { Real gamma = incompleteGammaComputer<Real, twoS>::compute(x); return {gamma, (0.5f * twoS) * gamma + pow(x, (0.5f * twoS)) * exp(-x)}; } }; /*! * Compute upper incomplete gamma functions for negative half-integral s values using the recursion * \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \frac{2\Gamma[\frac{\mathrm{twoS}+2}{2},x] - * 2x^\frac{\mathrm{twoS}}{2}e^{-x}}{\mathrm{twoS}}\f$ */ template <typename Real, int twoS> struct incompleteVirialGammaRecursion<Real, twoS, false> { static std::pair<Real, Real> compute(Real x) { Real gamma = incompleteGammaComputer<Real, twoS + 2>::compute(x); return {(gamma - pow(x, 0.5f * twoS) * exp(-x)) / (0.5f * twoS), gamma}; } }; /*! * \class incompleteGammaVirialComputer * \brief Computes the upper incomplete Gamma function for two different values: s and s+1. * \f$ \Gamma[s,x] = \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t \f$ * In this code we only need half integral arguments for \f$s\f$, and only positive \f$x\f$ arguments. * \tparam Real the floating point type to use for arithmetic. * \tparam twoS twice the s value required. */ template <typename Real, int twoS> struct incompleteGammaVirialComputer { /*! * \brief Computes the incomplete gamma function for argument twoS and twoS+2. * \param x value required. * \return \f$\Gamma[\frac{\mathrm{twoS}}{2}, x]\f$ and \f$\Gamma[\frac{\mathrm{twoS+2}}{2}, x]\f$. */ static std::pair<Real, Real> compute(Real x) { return incompleteVirialGammaRecursion<Real, twoS, (twoS >= 0)>::compute(x); } }; /*! * \class gammaComputer * \brief Computes the Gamma function. * \f$ \Gamma[s] = \int_0^\infty t^{s-1} e^{-t} \mathrm{d}t \f$ * In this code we only need half integral values for the \f$s\f$ argument, so the input * argument \f$s\f$ will yield \f$\Gamma[\frac{s}{2}]\f$. * \tparam Real the floating point type to use for arithmetic. * \tparam twoS twice the s value required. */ template <typename Real, int twoS> struct gammaComputer { /// The value of \f$\Gamma[\frac{\mathrm{twos}}{2}]\f$ static constexpr Real value = gammaRecursion<Real, twoS, (twoS > 0)>::value; }; /*! * \brief Computes the Gamma function using recursion instead of template metaprogramming. * \f$ \Gamma[s] = \int_0^\infty t^{s-1} e^{-t} \mathrm{d}t \f$ * In this code we only need half integral values for the \f$s\f$ argument, so the input * argument \f$s\f$ will yield \f$\Gamma[\frac{s}{2}]\f$. * \tparam Real the floating point type to use for arithmetic. * \param twoS twice the s value required. */ template <typename Real> Real nonTemplateGammaComputer(int twoS) { if (twoS == 1) { return HELPME_SQRTPI; } else if (twoS == 2) { return 1; } else if (twoS <= 0 && twoS % 2 == 0) { return std::numeric_limits<Real>::max(); } else if (twoS > 0) { return nonTemplateGammaComputer<Real>(twoS - 2) * (0.5f * twoS - 1); } else { return nonTemplateGammaComputer<Real>(twoS + 2) / (0.5f * twoS); } } } // Namespace helpme #endif // Header guard // original file: src/gridsize.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_GRIDSIZE_H_ #define _HELPME_STANDALONE_GRIDSIZE_H_ #include <algorithm> #include <cmath> #include <initializer_list> #include <vector> namespace helpme { // N.B. The templates here are just to avoid multiple definitions in the .so file. /*! * \brief allDivisors checks that a list of values are divisors of a given input value. * \param gridSize the gridSize to check for divisors. * \param requiredDivisors the list of divisors. * \return whether all listed values are divisors of gridSize. */ template <typename T> bool allDivisors(T gridSize, const std::initializer_list<T> &requiredDivisors) { for (const T &divisor : requiredDivisors) if (gridSize % divisor) return false; return true; } /*! * \brief findGridSize FFTW likes to have transformations with dimensions of the form * * a b c d e f * 2 3 5 7 11 13 * * where a,b,c and d are general and e+f is either 0 or 1. MKL has similar demands: * * https://software.intel.com/en-us/articles/fft-length-and-layout-advisor/ * http://www.fftw.org/fftw3_doc/Real_002ddata-DFTs.html * * This routine will compute the next largest grid size subject to the constraint that the * resulting size is a multiple of a given factor. * \param inputSize the minimum size of the grid. * \param requiredDivisors list of values that must be a factor of the output grid size. * \return the adjusted grid size. */ template <typename T> int findGridSize(T inputSize, const std::initializer_list<T> &requiredDivisors) { std::vector<int> primeFactors{2, 3, 5, 7}; T minDivisor = std::min(requiredDivisors); T currentSize = minDivisor * std::ceil(static_cast<float>(inputSize) / minDivisor); while (true) { // Now we know that the grid size is a multiple of requiredFactor, check // that it satisfies the prime factor requirements stated above. T remainder = currentSize; for (const int &factor : primeFactors) while (remainder > 1 && remainder % factor == 0) remainder /= factor; if ((remainder == 1 || remainder == 11 || remainder == 13) && allDivisors(currentSize, requiredDivisors)) return currentSize; currentSize += minDivisor; } } } // Namespace helpme #endif // Header guard // #include "matrix.h" #if HAVE_MKL == 1 #include "mkl.h" #endif // #include "memory.h" #if HAVE_MPI == 1 // original file: src/mpi_wrapper.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_MPI_WRAPPER_H_ #define _HELPME_STANDALONE_MPI_WRAPPER_H_ #include <mpi.h> #include <complex> #include <iomanip> #include <iostream> #include <stdexcept> namespace helpme { /*! * \brief The MPITypes struct abstracts away the MPI_Datatype types for different floating point modes * using templates to hide the details from the caller. */ template <typename Real> struct MPITypes { MPI_Datatype realType_; MPI_Datatype complexType_; MPITypes() { throw std::runtime_error("MPI wrapper has not been implemented for the requested floating point type."); } }; template <> MPITypes<float>::MPITypes() : realType_(MPI_FLOAT), complexType_(MPI_C_COMPLEX) {} template <> MPITypes<double>::MPITypes() : realType_(MPI_DOUBLE), complexType_(MPI_C_DOUBLE_COMPLEX) {} template <> MPITypes<long double>::MPITypes() : realType_(MPI_LONG_DOUBLE), complexType_(MPI_C_LONG_DOUBLE_COMPLEX) {} /*! * \brief The MPIWrapper struct is a lightweight C++ wrapper around the C MPI functions. Its main * purpose is to provide RAII semantics, ensuring that memory is correctly freed. It also * conveniently abstracts away the different MPI type descriptors for each floating point type. */ template <typename Real> struct MPIWrapper { MPITypes<Real> types_; /// The MPI communicator instance to use for all reciprocal space work. MPI_Comm mpiCommunicator_; /// The total number of MPI nodes involved in reciprocal space work. int numNodes_; /// The MPI rank of this node. int myRank_; /// The number of nodes in the X direction. int numNodesX_; /// The number of nodes in the Y direction. int numNodesY_; /// The number of nodes in the Z direction. int numNodesZ_; void assertNodePartitioningValid(int numNodes, int numNodesX, int numNodesY, int numNodesZ) const { if (numNodes != numNodesX * numNodesY * numNodesZ) throw std::runtime_error( "Communicator world size does not match the numNodesX, numNodesY, numNodesZ passed in."); } MPIWrapper() : mpiCommunicator_(0), numNodes_(0), myRank_(0) {} MPIWrapper(const MPI_Comm& communicator, int numNodesX, int numNodesY, int numNodesZ) : numNodesX_(numNodesX), numNodesY_(numNodesY), numNodesZ_(numNodesZ) { if (MPI_Comm_dup(communicator, &mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_dup in MPIWrapper constructor."); if (MPI_Comm_size(mpiCommunicator_, &numNodes_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_size in MPIWrapper constructor."); if (MPI_Comm_rank(mpiCommunicator_, &myRank_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_rank in MPIWrapper constructor."); assertNodePartitioningValid(numNodes_, numNodesX, numNodesY, numNodesZ); } ~MPIWrapper() { if (mpiCommunicator_) MPI_Comm_free(&mpiCommunicator_); } /*! * \brief barrier wait for all members of this communicator to reach this point. */ void barrier() { if (MPI_Barrier(mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem in MPI Barrier call!"); } /*! * \brief split split this communicator into subgroups. * \param color the number identifying the subgroup the new communicator belongs to. * \param key the rank of the new communicator within the subgroup. * \return the new communicator. */ std::unique_ptr<MPIWrapper> split(int color, int key) { std::unique_ptr<MPIWrapper> newWrapper(new MPIWrapper); if (MPI_Comm_split(mpiCommunicator_, color, key, &newWrapper->mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_split in MPIWrapper split."); if (MPI_Comm_size(newWrapper->mpiCommunicator_, &newWrapper->numNodes_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_size in MPIWrapper split."); if (MPI_Comm_rank(newWrapper->mpiCommunicator_, &newWrapper->myRank_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_rank in MPIWrapper split."); return newWrapper; } /*! * \brief allToAll perform alltoall communication within this communicator. * \param inBuffer the buffer containing input data. * \param outBuffer the buffer to send results to. * \param dimension the number of elements to be communicated. */ void allToAll(std::complex<Real>* inBuffer, std::complex<Real>* outBuffer, int dimension) { if (MPI_Alltoall(inBuffer, 2 * dimension, types_.realType_, outBuffer, 2 * dimension, types_.realType_, mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem encountered calling MPI alltoall."); } /*! * \brief allToAll perform alltoall communication within this communicator. * \param inBuffer the buffer containing input data. * \param outBuffer the buffer to send results to. * \param dimension the number of elements to be communicated. */ void allToAll(Real* inBuffer, Real* outBuffer, int dimension) { if (MPI_Alltoall(inBuffer, dimension, types_.realType_, outBuffer, dimension, types_.realType_, mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem encountered calling MPI alltoall."); } /*! * \brief reduce performs a reduction, with summation as the operation. * \param inBuffer the buffer containing input data. * \param outBuffer the buffer to send results to. * \param dimension the number of elements to be reduced. * \param node the node to reduce the result to (defaulted to zero). */ void reduce(Real* inBuffer, Real* outBuffer, int dimension, int node = 0) { if (MPI_Reduce(inBuffer, outBuffer, dimension, types_.realType_, MPI_SUM, node, mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem encountered calling MPI reduce."); } /*! * \brief reduceScatterBlock performs a reduction, with summation as the operation, then scatters to all nodes. * \param inBuffer the buffer containing input data. * \param outBuffer the buffer to send results to. * \param dimension the number of elements to be reduced on each node (currently must be the same on all nodes). */ void reduceScatterBlock(Real* inBuffer, Real* outBuffer, int dimension) { if (MPI_Reduce_scatter_block(inBuffer, outBuffer, dimension, types_.realType_, MPI_SUM, mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem encountered calling MPI reducescatter."); } /*! * \brief allGather broadcasts a chunk of data from each node to every other node. * \param inBuffer the buffer containing input data. * \param dimension the number of elements to be broadcast. * \param outBuffer the buffer to send results to. */ void allGather(Real* inBuffer, Real* outBuffer, int dimension) { if (MPI_Allgather(inBuffer, dimension, types_.realType_, outBuffer, dimension, types_.realType_, mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem encountered calling MPI allgather."); } /*! * \brief operator << a convenience wrapper around ostream, to inject node info. */ friend std::ostream& operator<<(std::ostream& os, const MPIWrapper& obj) { os << "Node " << obj.myRank_ << " of " << obj.numNodes_ << ":" << std::endl; return os; } }; // Adapter to allow piping of streams into unique_ptr-held object template <typename Real> std::ostream& operator<<(std::ostream& os, const std::unique_ptr<MPIWrapper<Real>>& obj) { os << *obj; return os; } // A convenience macro to guarantee that each node prints in order. #define PRINT(out) \ if (mpiCommunicator_) { \ for (int node = 0; node < mpiCommunicator_->numNodes_; ++node) { \ std::cout.setf(std::ios::fixed, std::ios::floatfield); \ if (node == mpiCommunicator_->myRank_) \ std::cout << mpiCommunicator_ << std::setw(18) << std::setprecision(10) << out << std::endl; \ mpiCommunicator_->barrier(); \ }; \ } else { \ std::cout << std::setw(18) << std::setprecision(10) << out << std::endl; \ } } // Namespace helpme #endif // Header guard #endif // original file: src/powers.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_POWERS_H_ #define _HELPME_STANDALONE_POWERS_H_ #include <cmath> /*! * \file powers.h * \brief Contains template functions to compute various quantities raised to an integer power. */ namespace helpme { template <typename Real, int n> struct raiseToIntegerPower { static Real pow(Real val) { return val * raiseToIntegerPower<Real, n - 1>::pow(val); } }; /// Base recursion for the power. template <typename Real> struct raiseToIntegerPower<Real, 0> { static Real pow(Real) { return 1; } }; /// n is positive and even case template <typename Real, int n, bool nIsPositive, bool nIsEven> struct normIntegerPowerComputer { static Real compute(Real val) { return raiseToIntegerPower<Real, n / 2>::pow(val); } }; /// n is positive and odd case template <typename Real, int n> struct normIntegerPowerComputer<Real, n, true, false> { static Real compute(Real val) { return raiseToIntegerPower<Real, n>::pow(std::sqrt(val)); } }; /// n is negative and even case template <typename Real, int n> struct normIntegerPowerComputer<Real, n, false, true> { static Real compute(Real val) { return raiseToIntegerPower<Real, -n / 2>::pow(1 / val); } }; /// n is negative and odd case template <typename Real, int n> struct normIntegerPowerComputer<Real, n, false, false> { static Real compute(Real val) { return raiseToIntegerPower<Real, -n>::pow(1 / sqrt(val)); } }; /*! * \brief Compute a quantity exponentiated by an integer power, using multiplication, * at compile time. The exponent is assumed to be positve. * \tparam Real the floating point type to use for arithmetic. * \tparam n the exponent to raise the value to. */ template <typename Real, int n> struct raiseNormToIntegerPower { /*! * \brief pow compute the norm raised to the power n. * \param val the square of the norm to be exponentiated. * \return the norm raised to the integer power. */ static Real compute(Real val) { return normIntegerPowerComputer<Real, n, (n >= 0), (n % 2 == 0)>::compute(val); } }; } // Namespace helpme #endif // Header guard // original file: src/splines.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_SPLINES_H_ #define _HELPME_STANDALONE_SPLINES_H_ // #include "matrix.h" /*! * \file splines.h * \brief Contains the C++ implementation of a cardinal B-Splines. */ namespace helpme { /*! * \class BSpline * \brief A class to compute cardinal B-splines. This code can compute arbitrary-order B-splines of * arbitrary derivative level, subject to the usual constraint that an order m spline is * differentiable m-2 times. * \tparam Real the floating point type to use for arithmetic. */ template <typename Real> class BSpline { protected: /// The order of this B-spline. short order_; /// The maximum derivative level for this B-spline. short derivativeLevel_; /// B-Splines with rows corresponding to derivative level, and columns to spline component. Matrix<Real> splines_; /// The grid point at which to start interpolation. short startingGridPoint_; /// Makes B-Spline array. inline void makeSplineInPlace(Real *array, const Real &val, const short &n) const { Real denom = (Real)1 / (n - 1); array[n - 1] = denom * val * array[n - 2]; for (short j = 1; j < n - 1; ++j) array[n - j - 1] = denom * ((val + j) * array[n - j - 2] + (n - j - val) * array[n - j - 1]); array[0] *= denom * (1 - val); } /// Takes BSpline derivative. inline void differentiateSpline(const Real *array, Real *dArray, const short &n) const { dArray[0] = -array[0]; for (short j = 1; j < n - 1; ++j) dArray[j] = array[j - 1] - array[j]; dArray[n - 1] = array[n - 2]; } /*! * \brief assertSplineIsSufficient ensures that the spline is large enough to be differentiable. * An mth order B-Spline is differentiable m-2 times. */ void assertSplineIsSufficient(int splineOrder, int derivativeLevel) const { if (splineOrder - derivativeLevel < 2) { std::string msg( "The spline order used is not sufficient for the derivative level requested." "Set the spline order to at least "); msg += std::to_string(derivativeLevel + 2); msg += " to run this calculation."; throw std::runtime_error(msg); } } public: /// The B-splines and their derivatives. See update() for argument details. BSpline(short start, Real value, short order, short derivativeLevel) : splines_(derivativeLevel + 1, order) { update(start, value, order, derivativeLevel); } /*! * \brief update computes information for BSpline, without reallocating memory unless needed. * \param start the grid point at which to start interpolation. * \param value the distance (in fractional coordinates) from the starting grid point. * \param order the order of the BSpline. * \param derivativeLevel the maximum level of derivative needed for this BSpline. */ void update(short start, Real value, short order, short derivativeLevel) { assertSplineIsSufficient(order, derivativeLevel); startingGridPoint_ = start; order_ = order; derivativeLevel_ = derivativeLevel; // The +1 is to account for the fact that we need to store entries up to and including the max. if (splines_.nRows() < derivativeLevel + 1 || splines_.nCols() != order) splines_ = Matrix<Real>(derivativeLevel + 1, order); splines_.setZero(); splines_(0, 0) = 1 - value; splines_(0, 1) = value; for (short m = 1; m < order_ - 1; ++m) { makeSplineInPlace(splines_[0], value, m + 2); if (m >= order_ - derivativeLevel_ - 2) { short currentDerivative = order_ - m - 2; for (short l = 0; l < currentDerivative; ++l) differentiateSpline(splines_[l], splines_[l + 1], m + 2 + currentDerivative); } } } BSpline() {} /*! * \brief The modulus of the B-Spline in Fourier space. * \param gridDim the dimension of the grid in the dimension this spline is to be used. * \param mValues if provided, provides the ordering of the m values, if not they are * ordered as 0, 1, 2, ..., Kmax, -Kmax+1, -Kmax+2, ..., -2, -1. * \return a gridDim long vector containing the inverse of the Fourier space spline moduli. */ helpme::vector<Real> invSplineModuli(short gridDim, std::vector<int> mValues = {}) { int nKTerms = mValues.size() ? mValues.size() : gridDim; helpme::vector<Real> splineMods(nKTerms, 0); Real prefac = 2 * M_PI / gridDim; for (int m = 0; m < nKTerms; ++m) { Real real = 0; Real imag = 0; int mValue = mValues.size() ? mValues[m] : m; for (int n = 0; n < order_; ++n) { Real exparg = mValue * n * prefac; Real jSpline = splines_(0, n); real += jSpline * cos(exparg); imag += jSpline * sin(exparg); } splineMods[m] = real * real + imag * imag; } // Correct tiny values for conventional PME. if (!mValues.size()) { constexpr Real EPS = 1e-7f; if (splineMods[0] < EPS) splineMods[0] = splineMods[1] / 2; for (int i = 0; i < gridDim - 1; ++i) if (splineMods[i] < EPS) splineMods[i] = (splineMods[i - 1] + splineMods[i + 1]) / 2; if (splineMods[gridDim - 1] < EPS) splineMods[gridDim - 1] = splineMods[gridDim - 2] / 2; } // Invert, to avoid division later on. for (int i = 0; i < nKTerms; ++i) splineMods[i] = 1 / splineMods[i]; return splineMods; } /*! * \brief Gets the grid point to start interpolating from. * \return the index of the first grid point this spline supports. */ short startingGridPoint() const { return startingGridPoint_; } /*! * \brief Returns the B-Spline, or derivative thereof. * \param deriv the derivative level of the spline to be returned. */ const Real *operator[](const int &deriv) const { return splines_[deriv]; } /*! * \brief Get read-only access to the full spline data. * \returns a const reference to the full spline data: row index is derivative, col index is spline component. */ const Matrix<Real> &splineData() const { return splines_; } }; } // Namespace helpme #endif // Header guard // #include "string_utils.h" // original file: src/tensor_utils.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STANDALONE_TENSOR_UTILS_H_ #define _HELPME_STANDALONE_TENSOR_UTILS_H_ #if HAVE_BLAS == 1 extern "C" { extern void dgemm_(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); extern void sgemm_(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); } #endif namespace helpme { /*! * \brief Sorts a 3D tensor stored contiguously as ABC into CBA order. * \param abcPtr the address of the incoming ABC ordered tensor. * \param aDimension the dimension of the A index. * \param bDimension the dimension of the B index. * \param cDimension the dimension of the C index. * \param cbaPtr the address of the outgoing CBA ordered tensor. * \param nThreads the number of parallel threads to use. */ template <typename Real> void permuteABCtoCBA(Real const *__restrict__ abcPtr, int const aDimension, int const bDimension, int const cDimension, Real *__restrict__ cbaPtr, size_t nThreads = 1) { #pragma omp parallel for num_threads(nThreads) for (int C = 0; C <= -1 + cDimension; ++C) for (int B = 0; B <= -1 + bDimension; ++B) for (int A = 0; A <= -1 + aDimension; ++A) cbaPtr[aDimension * bDimension * C + aDimension * B + A] = abcPtr[cDimension * bDimension * A + cDimension * B + C]; } /*! * \brief Sorts a 3D tensor stored contiguously as ABC into ACB order. * \param abcPtr the address of the incoming ABC ordered tensor. * \param aDimension the dimension of the A index. * \param bDimension the dimension of the B index. * \param cDimension the dimension of the C index. * \param acbPtr the address of the outgoing ACB ordered tensor. * \param nThreads the number of parallel threads to use. */ template <typename Real> void permuteABCtoACB(Real const *__restrict__ abcPtr, int const aDimension, int const bDimension, int const cDimension, Real *__restrict__ acbPtr, size_t nThreads = 1) { #pragma omp parallel for num_threads(nThreads) for (int A = 0; A <= -1 + aDimension; ++A) for (int C = 0; C <= -1 + cDimension; ++C) for (int B = 0; B <= -1 + bDimension; ++B) acbPtr[bDimension * cDimension * A + bDimension * C + B] = abcPtr[cDimension * bDimension * A + cDimension * B + C]; } /*! * \brief Contracts an ABxC tensor with a DxC tensor, to produce an ABxD quantity. * \param abcPtr the address of the incoming ABxC tensor. * \param dcPtr the address of the incoming DxC tensor. * \param abDimension the dimension of the AB index. * \param cDimension the dimension of the C index. * \param dDimension the dimension of the D index. * \param abdPtr the address of the outgoing ABD tensor. */ template <typename Real> void contractABxCWithDxC(Real const *__restrict__ abcPtr, Real const *__restrict__ dcPtr, int const abDimension, int const cDimension, int const dDimension, Real *__restrict__ abdPtr) { Real acc_C; for (int AB = 0; AB <= -1 + abDimension; ++AB) { for (int D = 0; D <= -1 + dDimension; ++D) { acc_C = 0; for (int C = 0; C <= -1 + cDimension; ++C) acc_C = acc_C + abcPtr[cDimension * AB + C] * dcPtr[cDimension * D + C]; abdPtr[dDimension * AB + D] = acc_C; } } } #if HAVE_BLAS == 1 template <> void contractABxCWithDxC<float>(float const *__restrict__ abcPtr, float const *__restrict__ dcPtr, int const abDimension, int const cDimension, int const dDimension, float *__restrict__ abdPtr) { if (abDimension == 0 || cDimension == 0 || dDimension == 0) return; char transB = 't'; char transA = 'n'; float alpha = 1; float beta = 0; sgemm_(&transB, &transA, const_cast<int *>(&dDimension), const_cast<int *>(&abDimension), const_cast<int *>(&cDimension), &alpha, const_cast<float *>(dcPtr), const_cast<int *>(&cDimension), const_cast<float *>(abcPtr), const_cast<int *>(&cDimension), &beta, abdPtr, const_cast<int *>(&dDimension)); } template <> void contractABxCWithDxC<double>(double const *__restrict__ abcPtr, double const *__restrict__ dcPtr, int const abDimension, int const cDimension, int const dDimension, double *__restrict__ abdPtr) { if (abDimension == 0 || cDimension == 0 || dDimension == 0) return; char transB = 't'; char transA = 'n'; double alpha = 1; double beta = 0; dgemm_(&transB, &transA, const_cast<int *>(&dDimension), const_cast<int *>(&abDimension), const_cast<int *>(&cDimension), &alpha, const_cast<double *>(dcPtr), const_cast<int *>(&cDimension), const_cast<double *>(abcPtr), const_cast<int *>(&cDimension), &beta, abdPtr, const_cast<int *>(&dDimension)); } #endif } // Namespace helpme #endif // Header guard /*! * \file helpme.h * \brief Contains the C++ implementation of a PME Instance, and related helper classes. */ namespace helpme { /*! * \brief nCartesian computes the total number of Cartesian components of a given angular momentum. * \param L the angular momentum. * \return total number of components up to and including angular momentum L. */ static int nCartesian(int L) { return (L + 1) * (L + 2) * (L + 3) / 6; } /*! * \brief cartAddress computes the address of a term with given quantum numbers in a Cartesian buffer. * \param lx the x quantum number. * \param ly the y quantum number. * \param lz the z quantum number. * \return the address of an {lx, ly, lz} quantity in a buffer that contains all lower angular momentum terms too. */ static int cartAddress(int lx, int ly, int lz) { int l = lx + ly + lz; return l * (l + 1) * (l + 2) / 6 + lz * (l * 2 - lz + 3) / 2 + ly; } // This is used to define function pointers in the constructor, and makes it easy to add new kernels. #define ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(n) \ case n: \ convolveEVFxn_ = &convolveEVImpl<n>; \ convolveEVCompressedFxn_ = &convolveEVCompressedImpl<n>; \ cacheInfluenceFunctionFxn_ = &cacheInfluenceFunctionImpl<n>; \ slfEFxn_ = &slfEImpl<n>; \ dirEFxn_ = &dirEImpl<n>; \ adjEFxn_ = &adjEImpl<n>; \ dirEFFxn_ = &dirEFImpl<n>; \ adjEFFxn_ = &adjEFImpl<n>; \ break; /*! * \class splineCacheEntry * \brief A placeholder to encapsulate information about a given atom's splines */ template <typename Real> struct SplineCacheEntry { BSpline<Real> aSpline, bSpline, cSpline; int absoluteAtomNumber; SplineCacheEntry(int order, int derivativeLevel) : aSpline(0, 0, order, derivativeLevel), bSpline(0, 0, order, derivativeLevel), cSpline(0, 0, order, derivativeLevel), absoluteAtomNumber(-1) {} }; /*! * \class PMEInstance * \brief A class to encapsulate information related to a particle mesh Ewald calculation. * * By storing information related to a single PME calculation in this way, we allow multiple * instances to be created in calculations requiring multiple PMEs, e.g. for computing both * electrostatic and attractive dispersion terms using PME to handle long-range interactions. * \tparam Real the floating point type to use for arithmetic. */ template <typename Real, typename std::enable_if<std::is_floating_point<Real>::value, int>::type = 0> class PMEInstance { using GridIterator = std::vector<std::vector<std::pair<short, short>>>; using Complex = std::complex<Real>; using Spline = BSpline<Real>; using RealMat = Matrix<Real>; using RealVec = helpme::vector<Real>; public: /*! * \brief The algorithm being used to solve for the reciprocal space quantities. */ enum class AlgorithmType : int { Undefined = 0, PME = 1, CompressedPME = 2 }; /*! * \brief The different conventions for orienting a lattice constructed from input parameters. */ enum class LatticeType : int { Undefined = 0, XAligned = 1, ShapeMatrix = 2 }; /*! * \brief The different conventions for numbering nodes. */ enum class NodeOrder : int { Undefined = 0, ZYX = 1 }; /*! * \brief The method used to converge induced dipoles */ enum class PolarizationType : int { Mutual = 0, Direct = 1 }; protected: /// The FFT grid dimensions in the {A,B,C} grid dimensions. int gridDimensionA_ = 0, gridDimensionB_ = 0, gridDimensionC_ = 0; /// The number of K vectors in the {A,B,C} dimensions. Equal to dim{A,B,C} for PME, lower for cPME. int numKSumTermsA_ = 0, numKSumTermsB_ = 0, numKSumTermsC_ = 0; /// The number of K vectors in the {A,B,C} dimensions to be handled by this node in a parallel setup. int myNumKSumTermsA_ = 0, myNumKSumTermsB_ = 0, myNumKSumTermsC_ = 0; /// The full A dimension after real->complex transformation. int complexGridDimensionA_ = 0; /// The locally owned A dimension after real->complex transformation. int myComplexGridDimensionA_ = 0; /// The order of the cardinal B-Spline used for interpolation. int splineOrder_ = 0; /// The actual number of threads per MPI instance, and the number requested previously. int nThreads_ = -1, requestedNumberOfThreads_ = -1; /// The exponent of the (inverse) interatomic distance used in this kernel. int rPower_ = 0; /// The scale factor to apply to all energies and derivatives. Real scaleFactor_ = 0; /// The attenuation parameter, whose units should be the inverse of those used to specify coordinates. Real kappa_ = 0; /// The lattice vectors. RealMat boxVecs_ = RealMat(3, 3); /// The reciprocal lattice vectors. RealMat recVecs_ = RealMat(3, 3); /// The scaled reciprocal lattice vectors, for transforming forces from scaled fractional coordinates. RealMat scaledRecVecs_ = RealMat(3, 3); /// A list of the number of splines handle by each thread on this node. std::vector<size_t> numAtomsPerThread_; /// An iterator over angular momentum components. std::vector<std::array<short, 3>> angMomIterator_; /// From a given starting point on the {A,B,C} edge of the grid, lists all points to be handled, correctly wrapping /// around the end. GridIterator gridIteratorA_, gridIteratorB_, gridIteratorC_; /// The grid iterator for the C dimension, divided up by threads to avoid race conditions in parameter spreading. std::vector<GridIterator> threadedGridIteratorC_; /// The (inverse) bspline moduli to normalize the spreading / probing steps; these are folded into the convolution. RealVec splineModA_, splineModB_, splineModC_; /// The cached influence function involved in the convolution. RealVec cachedInfluenceFunction_; /// A function pointer to call the approprate function to implement convolution with virial for conventional PME, /// templated to the rPower value. std::function<Real(bool, int, int, int, int, int, int, int, Real, Complex *, const RealMat &, Real, Real, const Real *, const Real *, const Real *, const int *, const int *, const int *, RealMat &, int)> convolveEVFxn_; /// A function pointer to call the approprate function to implement convolution with virial for comporessed PME, /// templated to the rPower value. std::function<Real(int, int, int, int, int, int, Real, const Real *, Real *, const RealMat &, Real, Real, const Real *, const Real *, const Real *, const int *, const int *, const int *, RealMat &, int)> convolveEVCompressedFxn_; /// A function pointer to call the approprate function to implement cacheing of the influence function that appears // in the convolution, templated to the rPower value. std::function<void(int, int, int, int, int, int, Real, RealVec &, const RealMat &, Real, Real, const Real *, const Real *, const Real *, const int *, const int *, const int *, int)> cacheInfluenceFunctionFxn_; /// A function pointer to call the approprate function to compute self energy, templated to the rPower value. std::function<Real(int, Real, Real)> slfEFxn_; /// A function pointer to call the approprate function to compute the direct energy, templated to the rPower value. std::function<Real(Real, Real)> dirEFxn_; /// A function pointer to call the approprate function to compute the adjusted energy, templated to the rPower /// value. std::function<Real(Real, Real)> adjEFxn_; /// A function pointer to call the approprate function to compute the direct energy and force, templated to the /// rPower value. std::function<std::tuple<Real, Real>(Real, Real, Real)> dirEFFxn_; /// A function pointer to call the approprate function to compute the adjusted energy and force, templated to the /// rPower value. std::function<std::tuple<Real, Real>(Real, Real, Real)> adjEFFxn_; #if HAVE_MPI == 1 /// The communicator object that handles interactions with MPI. std::unique_ptr<MPIWrapper<Real>> mpiCommunicator_; /// The communicator object that handles interactions with MPI along this nodes {A,B,C} pencils. std::unique_ptr<MPIWrapper<Real>> mpiCommunicatorA_, mpiCommunicatorB_, mpiCommunicatorC_; #endif /// The number of nodes in the {A,B,C} dimensions. int numNodesA_ = 1, numNodesB_ = 1, numNodesC_ = 1; /// The rank of this node along the {A,B,C} dimensions. int myNodeRankA_ = 0, myNodeRankB_ = 0, myNodeRankC_ = 0; /// The first grid point that this node is responsible for in the {A,B,C} dimensions. int myFirstGridPointA_ = 0, myFirstGridPointB_ = 0, myFirstGridPointC_ = 0; /// The first K sum term that this node is responsible for. int firstKSumTermA_ = 0, firstKSumTermB_ = 0, firstKSumTermC_ = 0; /// The {X,Y,Z} dimensions of the locally owned chunk of the grid. int myGridDimensionA_ = 0, myGridDimensionB_ = 0, myGridDimensionC_ = 0; /// The subsets of a given dimension to be processed when doing a transform along another dimension. int subsetOfCAlongA_ = 0, subsetOfCAlongB_ = 0, subsetOfBAlongC_ = 0; /// The size of a cache line, in units of the size of the Real type, to allow better memory allocation policies. Real cacheLineSizeInReals_ = 0; /// The current unit cell parameters. Real cellA_ = 0, cellB_ = 0, cellC_ = 0, cellAlpha_ = 0, cellBeta_ = 0, cellGamma_ = 0; /// Whether the unit cell parameters have been changed, invalidating cached gF quantities. bool unitCellHasChanged_ = true; /// Whether the kappa has been changed, invalidating kappa-dependent quantities. bool kappaHasChanged_ = true; /// Whether any of the grid dimensions have changed. bool gridDimensionHasChanged_ = true; /// Whether any of the reciprocal sum dimensions have changed. bool reciprocalSumDimensionHasChanged_ = true; /// Whether the algorithm to be used has changed. bool algorithmHasChanged_ = true; /// Whether the spline order has changed. bool splineOrderHasChanged_ = true; /// Whether the scale factor has changed. bool scaleFactorHasChanged_ = true; /// Whether the power of R has changed. bool rPowerHasChanged_ = true; /// Whether the parallel node setup has changed in any way. bool numNodesHasChanged_ = true; /// The algorithm being used to solve for reciprocal space quantities. AlgorithmType algorithmType_ = AlgorithmType::Undefined; /// The type of alignment scheme used for the lattice vectors. LatticeType latticeType_ = LatticeType::Undefined; /// Communication buffers for MPI parallelism. helpme::vector<Complex> workSpace1_, workSpace2_; /// FFTW wrappers to help with transformations in the {A,B,C} dimensions. FFTWWrapper<Real> fftHelperA_, fftHelperB_, fftHelperC_; /// The cached list of splines. std::vector<SplineCacheEntry<Real>> splineCache_; /// A scratch array for each threads to use as storage when probing the grid. RealMat fractionalPhis_; /// A list of the splines that each thread should handle. std::vector<std::list<size_t>> splinesPerThread_; /// The transformation matrices for the compressed PME algorithms, in the {A,B,C} dimensions. RealMat compressionCoefficientsA_, compressionCoefficientsB_, compressionCoefficientsC_; /// Iterators that define the reciprocal lattice sums over each index, correctly defining -1/2 <= m{A,B,C} < 1/2. std::vector<int> mValsA_, mValsB_, mValsC_; /// A temporary list used in the assigning of atoms to threads and resorting by starting grid point. std::vector<std::set<std::pair<uint32_t, uint32_t>>> gridAtomList_; /*! * \brief makeGridIterator makes an iterator over the spline values that contribute to this node's grid * in a given Cartesian dimension. The iterator is of the form (grid point, spline index) and is * sorted by increasing grid point, for cache efficiency. * \param dimension the dimension of the grid in the Cartesian dimension of interest. * \param first the first grid point in the Cartesian dimension to be handled by this node. * \param last the element past the last grid point in the Cartesian dimension to be handled by this node. * \param paddingSize the size of the "halo" region around this grid onto which the charge can be spread * that really belongs to neighboring nodes. For compressed PME we assume that each node handles * only its own atoms and spreads onto an expanded grid to account for this. In regular PME there * is no padding because we assume that all halo atoms are present on this node before spreading. * \return the vector of spline iterators for each starting grid point. */ GridIterator makeGridIterator(int dimension, int first, int last, int paddingSize) const { GridIterator gridIterator; if (paddingSize) { // This version assumes that every atom on this node is blindly place on the // grid, requiring that a padding area of size splineOrder-1 be present. for (int gridStart = 0; gridStart < dimension; ++gridStart) { std::vector<std::pair<short, short>> splineIterator(splineOrder_); splineIterator.clear(); if (gridStart >= first && gridStart < last - paddingSize) { for (int splineIndex = 0; splineIndex < splineOrder_; ++splineIndex) { int gridPoint = (splineIndex + gridStart); splineIterator.push_back(std::make_pair(gridPoint - first, splineIndex)); } } splineIterator.shrink_to_fit(); gridIterator.push_back(splineIterator); } } else { // This version assumes that each node has its own atoms, plus "halo" atoms // from neighboring grids that can contribute to this node's grid. for (int gridStart = 0; gridStart < dimension; ++gridStart) { std::vector<std::pair<short, short>> splineIterator(splineOrder_); splineIterator.clear(); for (int splineIndex = 0; splineIndex < splineOrder_; ++splineIndex) { int gridPoint = (splineIndex + gridStart) % dimension; if (gridPoint >= first && gridPoint < last) splineIterator.push_back(std::make_pair(gridPoint - first, splineIndex)); } splineIterator.shrink_to_fit(); std::sort(splineIterator.begin(), splineIterator.end()); gridIterator.push_back(splineIterator); } } gridIterator.shrink_to_fit(); return gridIterator; } /*! Make sure that the iterator over AM components is up to date. * \param angMom the angular momentum required for the iterator over multipole components. */ void updateAngMomIterator(int parameterAngMom) { auto L = parameterAngMom; size_t expectedNTerms = nCartesian(L); if (angMomIterator_.size() >= expectedNTerms) return; angMomIterator_.resize(expectedNTerms); for (int l = 0, count = 0; l <= L; ++l) { for (int lz = 0; lz <= l; ++lz) { for (int ly = 0; ly <= l - lz; ++ly) { int lx = l - ly - lz; angMomIterator_[count] = {{static_cast<short>(lx), static_cast<short>(ly), static_cast<short>(lz)}}; ++count; } } } } /*! * \brief updateInfluenceFunction builds the gF array cache, if the lattice vector has changed since the last * build of it. If the cell is unchanged, this does nothing. This is handled * separately from other initializations because we may skip the cacheing of * the influence function when the virial is requested; we assume it's an NPT * calculation in this case and therefore the influence function changes every time. */ void updateInfluenceFunction() { if (unitCellHasChanged_ || kappaHasChanged_ || reciprocalSumDimensionHasChanged_ || splineOrderHasChanged_ || scaleFactorHasChanged_ || numNodesHasChanged_ || algorithmHasChanged_) { cacheInfluenceFunctionFxn_(myNumKSumTermsA_, myNumKSumTermsB_, myNumKSumTermsC_, firstKSumTermA_, firstKSumTermB_, firstKSumTermC_, scaleFactor_, cachedInfluenceFunction_, recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0], &splineModC_[0], mValsA_.data(), mValsB_.data(), mValsC_.data(), nThreads_); } } /*! * \brief Runs a PME reciprocal space calculation, computing the potential and, optionally, its derivatives as * well as the volume dependent part of the virial that comes from the structure factor. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). A negative value indicates that only the shell with |parameterAngMom| is to be considered, * e.g. a value of -2 specifies that only quadrupoles (and not dipoles or charges) will be provided; the input * matrix should have dimensions corresponding only to the number of terms in this shell. * \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param gridPoints the list of grid points at which the potential is needed; can be the same as the * coordinates. * \param derivativeLevel the order of the potential derivatives required; 0 is the potential, 1 is * (minus) the field, etc. A negative value indicates that only the derivative with order |parameterAngMom| * is to be generated, e.g. -2 specifies that only the second derivative (not the potential or its gradient) * will be returned as output. The output matrix should have space for only these terms, accordingly. * \param potential the array holding the potential. This is a matrix of dimensions * nAtoms x nD, where nD is the derivative level requested. See the details fo the parameters argument for * information about ordering of derivative components. N.B. this array is incremented with the potential, not * assigned, so take care to zero it first if only the current results are desired. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. */ void computePRecHelper(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, const RealMat &gridPoints, int derivativeLevel, RealMat &potential, RealMat &virial) { bool onlyOneShellForInput = parameterAngMom < 0; bool onlyOneShellForOutput = derivativeLevel < 0; parameterAngMom = std::abs(parameterAngMom); derivativeLevel = std::abs(derivativeLevel); int cartesianOffset = onlyOneShellForInput ? nCartesian(parameterAngMom - 1) : 0; sanityChecks(parameterAngMom, parameters, coordinates, cartesianOffset); updateAngMomIterator(std::max(parameterAngMom, derivativeLevel)); // Note: we're calling the version of spread parameters that computes its own splines here. // This is quite inefficient, but allow the potential to be computed at arbitrary locations by // simply regenerating splines on demand in the probing stage. If this becomes too slow, it's // easy to write some logic to check whether gridPoints and coordinates are the same, and // handle that special case using spline cacheing machinery for efficiency. Real *realGrid = reinterpret_cast<Real *>(workSpace1_.data()); std::fill(workSpace1_.begin(), workSpace1_.end(), 0); updateAngMomIterator(parameterAngMom); auto fractionalParameters = cartesianTransform(parameterAngMom, onlyOneShellForInput, scaledRecVecs_.transpose(), parameters); int nComponents = nCartesian(parameterAngMom) - cartesianOffset; size_t nAtoms = coordinates.nRows(); for (size_t atom = 0; atom < nAtoms; ++atom) { // Blindly reconstruct splines for this atom, assuming nothing about the validity of the cache. // Note that this incurs a somewhat steep cost due to repeated memory allocations. auto bSplines = makeBSplines(coordinates[atom], parameterAngMom); const auto &splineA = std::get<0>(bSplines); const auto &splineB = std::get<1>(bSplines); const auto &splineC = std::get<2>(bSplines); const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; int numPointsA = static_cast<int>(aGridIterator.size()); int numPointsB = static_cast<int>(bGridIterator.size()); int numPointsC = static_cast<int>(cGridIterator.size()); const auto *iteratorDataA = aGridIterator.data(); const auto *iteratorDataB = bGridIterator.data(); const auto *iteratorDataC = cGridIterator.data(); for (int component = 0; component < nComponents; ++component) { const auto &quanta = angMomIterator_[component + cartesianOffset]; Real param = fractionalParameters(atom, component); const Real *splineValsA = splineA[quanta[0]]; const Real *splineValsB = splineB[quanta[1]]; const Real *splineValsC = splineC[quanta[2]]; for (int pointC = 0; pointC < numPointsC; ++pointC) { const auto &cPoint = iteratorDataC[pointC]; Real cValP = param * splineValsC[cPoint.second]; for (int pointB = 0; pointB < numPointsB; ++pointB) { const auto &bPoint = iteratorDataB[pointB]; Real cbValP = cValP * splineValsB[bPoint.second]; Real *cbRow = &realGrid[cPoint.first * myGridDimensionB_ * myGridDimensionA_ + bPoint.first * myGridDimensionA_]; for (int pointA = 0; pointA < numPointsA; ++pointA) { const auto &aPoint = iteratorDataA[pointA]; cbRow[aPoint.first] += cbValP * splineValsA[aPoint.second]; } } } } } Real *potentialGrid; if (algorithmType_ == AlgorithmType::PME) { auto gridAddress = forwardTransform(realGrid); if (virial.nRows() == 0 && virial.nCols() == 0) { convolveE(gridAddress); } else { convolveEV(gridAddress, virial); } potentialGrid = inverseTransform(gridAddress); } else if (algorithmType_ == AlgorithmType::CompressedPME) { auto gridAddress = compressedForwardTransform(realGrid); if (virial.nRows() == 0 && virial.nCols() == 0) { convolveE(gridAddress); potentialGrid = compressedInverseTransform(gridAddress); } else { Real *convolvedGrid; convolveEV(gridAddress, convolvedGrid, virial); potentialGrid = compressedInverseTransform(convolvedGrid); } } else { std::logic_error("Unknown algorithm in helpme::computePRec"); } auto fracPotential = potential.clone(); fracPotential.setZero(); cartesianOffset = onlyOneShellForOutput ? nCartesian(derivativeLevel - 1) : 0; int nPotentialComponents = nCartesian(derivativeLevel) - cartesianOffset; size_t nPoints = gridPoints.nRows(); for (size_t point = 0; point < nPoints; ++point) { Real *phiPtr = fracPotential[point]; auto bSplines = makeBSplines(gridPoints[point], derivativeLevel); auto splineA = std::get<0>(bSplines); auto splineB = std::get<1>(bSplines); auto splineC = std::get<2>(bSplines); const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; const Real *splineStartA = splineA[0]; const Real *splineStartB = splineB[0]; const Real *splineStartC = splineC[0]; for (const auto &cPoint : cGridIterator) { for (const auto &bPoint : bGridIterator) { const Real *cbRow = potentialGrid + cPoint.first * myGridDimensionA_ * myGridDimensionB_ + bPoint.first * myGridDimensionA_; for (const auto &aPoint : aGridIterator) { Real gridVal = cbRow[aPoint.first]; for (int component = 0; component < nPotentialComponents; ++component) { const auto &quanta = angMomIterator_[component + cartesianOffset]; const Real *splineValsA = splineStartA + quanta[0] * splineOrder_; const Real *splineValsB = splineStartB + quanta[1] * splineOrder_; const Real *splineValsC = splineStartC + quanta[2] * splineOrder_; phiPtr[component] += gridVal * splineValsA[aPoint.second] * splineValsB[bPoint.second] * splineValsC[cPoint.second]; } } } } } potential += cartesianTransform(derivativeLevel, onlyOneShellForOutput, scaledRecVecs_, fracPotential); } /*! * \brief Spreads parameters onto the grid for a single atom * \param atom the absolute atom number. * \param realGrid pointer to the array containing the grid in CBA order * \param nComponents the number of angular momentum components in the parameters. * \param nForceComponents the number of angular momentum components in the parameters with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param thread the ID of the thread handling this term. */ void spreadParametersImpl(const int &atom, Real *realGrid, const int &nComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, const RealMat &parameters, int thread) { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = threadedGridIteratorC_[thread][splineC.startingGridPoint()]; int numPointsA = static_cast<int>(aGridIterator.size()); int numPointsB = static_cast<int>(bGridIterator.size()); int numPointsC = static_cast<int>(cGridIterator.size()); const auto *iteratorDataA = aGridIterator.data(); const auto *iteratorDataB = bGridIterator.data(); const auto *iteratorDataC = cGridIterator.data(); for (int component = 0; component < nComponents; ++component) { const auto &quanta = angMomIterator_[component]; Real param = parameters(atom, component); const Real *splineValsA = splineA[quanta[0]]; const Real *splineValsB = splineB[quanta[1]]; const Real *splineValsC = splineC[quanta[2]]; for (int pointC = 0; pointC < numPointsC; ++pointC) { const auto &cPoint = iteratorDataC[pointC]; Real cValP = param * splineValsC[cPoint.second]; for (int pointB = 0; pointB < numPointsB; ++pointB) { const auto &bPoint = iteratorDataB[pointB]; Real cbValP = cValP * splineValsB[bPoint.second]; Real *cbRow = realGrid + cPoint.first * myGridDimensionB_ * myGridDimensionA_ + bPoint.first * myGridDimensionA_; for (int pointA = 0; pointA < numPointsA; ++pointA) { const auto &aPoint = iteratorDataA[pointA]; cbRow[aPoint.first] += cbValP * splineValsA[aPoint.second]; } } } } } /*! * \brief Probes the grid and computes the force for a single atom, specialized for zero parameter angular momentum. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param parameter the list of parameter associated with the given atom. * \param forces a 3 vector of the forces for this atom, ordered in memory as {Fx, Fy, Fz}. */ void probeGridImpl(const Real *potentialGrid, const Spline &splineA, const Spline &splineB, const Spline &splineC, const Real &parameter, Real *forces) const { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; // We unpack the vector to raw pointers, as profiling shows that using range based for loops over vectors // causes a signficant penalty in the innermost loop, primarily due to checking the loop stop condition. int numPointsA = static_cast<int>(aGridIterator.size()); int numPointsB = static_cast<int>(bGridIterator.size()); int numPointsC = static_cast<int>(cGridIterator.size()); const auto *iteratorDataA = aGridIterator.data(); const auto *iteratorDataB = bGridIterator.data(); const auto *iteratorDataC = cGridIterator.data(); const Real *splineStartA0 = splineA[0]; const Real *splineStartB0 = splineB[0]; const Real *splineStartC0 = splineC[0]; const Real *splineStartA1 = splineStartA0 + splineOrder_; const Real *splineStartB1 = splineStartB0 + splineOrder_; const Real *splineStartC1 = splineStartC0 + splineOrder_; Real Ex = 0, Ey = 0, Ez = 0; for (int pointC = 0; pointC < numPointsC; ++pointC) { const auto &cPoint = iteratorDataC[pointC]; const Real &splineC0 = splineStartC0[cPoint.second]; const Real &splineC1 = splineStartC1[cPoint.second]; for (int pointB = 0; pointB < numPointsB; ++pointB) { const auto &bPoint = iteratorDataB[pointB]; const Real &splineB0 = splineStartB0[bPoint.second]; const Real &splineB1 = splineStartB1[bPoint.second]; const Real *cbRow = potentialGrid + cPoint.first * myGridDimensionA_ * myGridDimensionB_ + bPoint.first * myGridDimensionA_; for (int pointA = 0; pointA < numPointsA; ++pointA) { const auto &aPoint = iteratorDataA[pointA]; const Real &splineA0 = splineStartA0[aPoint.second]; const Real &splineA1 = splineStartA1[aPoint.second]; const Real &gridVal = cbRow[aPoint.first]; Ey += gridVal * splineA0 * splineB1 * splineC0; Ez += gridVal * splineA0 * splineB0 * splineC1; Ex += gridVal * splineA1 * splineB0 * splineC0; } } } forces[0] -= parameter * (scaledRecVecs_[0][0] * Ex + scaledRecVecs_[0][1] * Ey + scaledRecVecs_[0][2] * Ez); forces[1] -= parameter * (scaledRecVecs_[1][0] * Ex + scaledRecVecs_[1][1] * Ey + scaledRecVecs_[1][2] * Ez); forces[2] -= parameter * (scaledRecVecs_[2][0] * Ex + scaledRecVecs_[2][1] * Ey + scaledRecVecs_[2][2] * Ez); } /*! * \brief Probes the grid and computes the force for a single atom, for arbitrary parameter angular momentum. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param nPotentialComponents the number of components in the potential and its derivatives with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param phiPtr a scratch array of length nPotentialComponents, to store the fractional potential. * N.B. Make sure that updateAngMomIterator() has been called first with the appropriate derivative * level for the requested potential derivatives. */ void probeGridImpl(const Real *potentialGrid, const int &nPotentialComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, Real *phiPtr) { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; const Real *splineStartA = splineA[0]; const Real *splineStartB = splineB[0]; const Real *splineStartC = splineC[0]; for (const auto &cPoint : cGridIterator) { for (const auto &bPoint : bGridIterator) { const Real *cbRow = potentialGrid + cPoint.first * myGridDimensionA_ * myGridDimensionB_ + bPoint.first * myGridDimensionA_; for (const auto &aPoint : aGridIterator) { Real gridVal = cbRow[aPoint.first]; for (int component = 0; component < nPotentialComponents; ++component) { const auto &quanta = angMomIterator_[component]; const Real *splineValsA = splineStartA + quanta[0] * splineOrder_; const Real *splineValsB = splineStartB + quanta[1] * splineOrder_; const Real *splineValsC = splineStartC + quanta[2] * splineOrder_; phiPtr[component] += gridVal * splineValsA[aPoint.second] * splineValsB[bPoint.second] * splineValsC[cPoint.second]; } } } } } /*! * \brief Probes the grid and computes the force for a single atom, for arbitrary parameter angular momentum. * \param atom the absolute atom number. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param nComponents the number of angular momentum components in the parameters. * \param nForceComponents the number of angular momentum components in the parameters with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param phiPtr a scratch array of length nForceComponents, to store the fractional potential. * \param fracParameters the list of parameters associated with the current atom, in * the scaled fraction coordinate basis (charges, C6 coefficients, * multipoles, etc...). For a parameter with angular momentum L, a matrix * of dimension nAtoms x nL is expected, where * nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. */ void probeGridImpl(const int &atom, const Real *potentialGrid, const int &nComponents, const int &nForceComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, Real *phiPtr, const Real *fracParameters, Real *forces) { std::fill(phiPtr, phiPtr + nForceComponents, 0); probeGridImpl(potentialGrid, nForceComponents, splineA, splineB, splineC, phiPtr); Real fracForce[3] = {0, 0, 0}; for (int component = 0; component < nComponents; ++component) { Real param = fracParameters[component]; const auto &quanta = angMomIterator_[component]; short lx = quanta[0]; short ly = quanta[1]; short lz = quanta[2]; fracForce[0] -= param * phiPtr[cartAddress(lx + 1, ly, lz)]; fracForce[1] -= param * phiPtr[cartAddress(lx, ly + 1, lz)]; fracForce[2] -= param * phiPtr[cartAddress(lx, ly, lz + 1)]; } forces[0] += scaledRecVecs_[0][0] * fracForce[0] + scaledRecVecs_[0][1] * fracForce[1] + scaledRecVecs_[0][2] * fracForce[2]; forces[1] += scaledRecVecs_[1][0] * fracForce[0] + scaledRecVecs_[1][1] * fracForce[1] + scaledRecVecs_[1][2] * fracForce[2]; forces[2] += scaledRecVecs_[2][0] * fracForce[0] + scaledRecVecs_[2][1] * fracForce[1] + scaledRecVecs_[2][2] * fracForce[2]; } /*! * \brief assertInitialized makes sure that setup() has been called before running any calculations. */ void assertInitialized() const { if (!rPower_) throw std::runtime_error( "Either setup(...) or setup_parallel(...) must be called before computing anything."); } /*! * \brief makeBSplines construct the {x,y,z} B-Splines. * \param atomCoords a 3-vector containing the atom's coordinates. * \param derivativeLevel level of derivative needed for the splines. * \return a 3-tuple containing the {x,y,z} B-splines. */ std::tuple<Spline, Spline, Spline> makeBSplines(const Real *atomCoords, short derivativeLevel) const { // Subtract a tiny amount to make sure we're not exactly on the rightmost (excluded) // grid point. The calculation is translationally invariant, so this is valid. constexpr float EPS = 1e-6f; Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) + atomCoords[2] * recVecs_(2, 0) - EPS; Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) + atomCoords[2] * recVecs_(2, 1) - EPS; Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) + atomCoords[2] * recVecs_(2, 2) - EPS; // Make sure the fractional coordinates fall in the range 0 <= s < 1 aCoord -= floor(aCoord); bCoord -= floor(bCoord); cCoord -= floor(cCoord); short aStartingGridPoint = gridDimensionA_ * aCoord; short bStartingGridPoint = gridDimensionB_ * bCoord; short cStartingGridPoint = gridDimensionC_ * cCoord; Real aDistanceFromGridPoint = gridDimensionA_ * aCoord - aStartingGridPoint; Real bDistanceFromGridPoint = gridDimensionB_ * bCoord - bStartingGridPoint; Real cDistanceFromGridPoint = gridDimensionC_ * cCoord - cStartingGridPoint; return std::make_tuple(Spline(aStartingGridPoint, aDistanceFromGridPoint, splineOrder_, derivativeLevel), Spline(bStartingGridPoint, bDistanceFromGridPoint, splineOrder_, derivativeLevel), Spline(cStartingGridPoint, cDistanceFromGridPoint, splineOrder_, derivativeLevel)); } /*! * \brief convolveEVImpl performs the reciprocal space convolution, returning the energy, for conventional PME. * We opt to not cache this the same way as the non-virial version because it's safe to assume that if * the virial is requested the box is likely to change, which renders the cache useless. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param useConjugateSymmetry whether to use the complex conjugate symmetry in the convolution or not. * \param fullNx full (complex) dimension of the reciprocal sum in the X direction. * \param myNx the subset of the reciprocal sum in the x direction to be handled by this node. * \param myNy the subset of the reciprocal sum in the y direction to be handled by this node. * \param myNz the subset of the reciprocal sum in the z direction to be handled by this node. * \param startX the starting reciprocal sum term handled by this node in the X direction. * \param startY the starting reciprocal sum term handled by this node in the Y direction. * \param startZ the starting reciprocal sum term handled by this node in the Z direction. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param gridPtr the Fourier space grid, with ordering YXZ. * \param boxInv the reciprocal lattice vectors. * \param volume the volume of the unit cell. * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param xMods the Fourier space norms of the x B-Splines. * \param yMods the Fourier space norms of the y B-Splines. * \param zMods the Fourier space norms of the z B-Splines. * \param xMVals the integer prefactors to iterate over reciprocal vectors in the x dimension. * \param yMVals the integer prefactors to iterate over reciprocal vectors in the y dimension. * \param zMVals the integer prefactors to iterate over reciprocal vectors in the z dimension. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \param nThreads the number of OpenMP threads to use. * \return the reciprocal space energy. */ template <int rPower> static Real convolveEVImpl(bool useConjugateSymmetry, int fullNx, int myNx, int myNy, int myNz, int startX, int startY, int startZ, Real scaleFactor, Complex *gridPtr, const RealMat &boxInv, Real volume, Real kappa, const Real *xMods, const Real *yMods, const Real *zMods, const int *xMVals, const int *yMVals, const int *zMVals, RealMat &virial, int nThreads) { Real energy = 0; bool nodeZero = startX == 0 && startY == 0 && startZ == 0; if (rPower > 3 && nodeZero) { // Kernels with rPower>3 are absolutely convergent and should have the m=0 term present. // To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm. Real prefac = 2 * scaleFactor * HELPME_PI * HELPME_SQRTPI * pow(kappa, rPower - 3) / ((rPower - 3) * gammaComputer<Real, rPower>::value * volume); energy += prefac * (gridPtr[0].real() * gridPtr[0].real() + gridPtr[0].imag() * gridPtr[0].imag()); } // Ensure the m=0 term convolution product is zeroed for the backtransform; it's been accounted for above. if (nodeZero) gridPtr[0] = Complex(0, 0); Real bPrefac = HELPME_PI * HELPME_PI / (kappa * kappa); Real volPrefac = scaleFactor * pow(HELPME_PI, rPower - 1) / (HELPME_SQRTPI * gammaComputer<Real, rPower>::value * volume); size_t nxz = (size_t)myNx * myNz; Real Vxx = 0, Vxy = 0, Vyy = 0, Vxz = 0, Vyz = 0, Vzz = 0; const Real *boxPtr = boxInv[0]; size_t nyxz = myNy * nxz; // Exclude m=0 cell. int start = (nodeZero ? 1 : 0); // Writing the three nested loops in one allows for better load balancing in parallel. #pragma omp parallel for reduction(+ : energy, Vxx, Vxy, Vyy, Vxz, Vyz, Vzz) num_threads(nThreads) for (size_t yxz = start; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; short ky = yxz / nxz; short kx = xz / myNz; short kz = xz % myNz; // We only loop over the first nx/2+1 x values in the complex case; // this accounts for the "missing" complex conjugate values. Real permPrefac = (useConjugateSymmetry && (kx + startX != 0) && (kx + startX != fullNx - 1)) ? 2 : 1; const int &mx = xMVals[kx]; const int &my = yMVals[ky]; const int &mz = zMVals[kz]; Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz; Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz; Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz; Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ; Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq); Real bSquared = bPrefac * mNormSq; auto gammas = incompleteGammaVirialComputer<Real, 3 - rPower>::compute(bSquared); Real eGamma = std::get<0>(gammas); Real vGamma = std::get<1>(gammas); Complex &gridVal = gridPtr[yxz]; Real structFacNorm = gridVal.real() * gridVal.real() + gridVal.imag() * gridVal.imag(); Real totalPrefac = volPrefac * mTerm * yMods[ky] * xMods[kx] * zMods[kz]; Real influenceFunction = totalPrefac * eGamma; gridVal *= influenceFunction; Real eTerm = permPrefac * influenceFunction * structFacNorm; Real vTerm = permPrefac * vGamma * totalPrefac / mNormSq * structFacNorm; energy += eTerm; Vxx += vTerm * mVecX * mVecX; Vxy += vTerm * mVecX * mVecY; Vyy += vTerm * mVecY * mVecY; Vxz += vTerm * mVecX * mVecZ; Vyz += vTerm * mVecY * mVecZ; Vzz += vTerm * mVecZ * mVecZ; } energy /= 2; virial[0][0] -= Vxx - energy; virial[0][1] -= Vxy; virial[0][2] -= Vyy - energy; virial[0][3] -= Vxz; virial[0][4] -= Vyz; virial[0][5] -= Vzz - energy; return energy; } /*! * \brief convolveEVCompressedImpl performs the reciprocal space convolution, returning the energy, for compressed * PME. We opt to not cache this the same way as the non-virial version because it's safe to assume that if the * virial is requested the box is likely to change, which renders the cache useless. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param myNx the subset of the reciprocal sum in the x direction to be handled by this node. * \param myNy the subset of the reciprocal sum in the y direction to be handled by this node. * \param myNz the subset of the reciprocal sum in the z direction to be handled by this node. * \param startX the starting reciprocal sum term handled by this node in the X direction. * \param startY the starting reciprocal sum term handled by this node in the Y direction. * \param startZ the starting reciprocal sum term handled by this node in the Z direction. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof * (e.g. thee 1 / [4 pi epslion0] for Coulomb calculations). * \param gridPtrIn the Fourier space grid, with ordering YXZ. * \param gridPtrOut the convolved Fourier space grid, with ordering YXZ. * \param boxInv the reciprocal lattice vectors. * \param volume the volume of the unit cell. * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param xMods the Fourier space norms of the x B-Splines. * \param yMods the Fourier space norms of the y B-Splines. * \param zMods the Fourier space norms of the z B-Splines. * \param xMVals the integer prefactors to iterate over reciprocal vectors in the x dimension. * \param yMVals the integer prefactors to iterate over reciprocal vectors in the y dimension. * \param zMVals the integer prefactors to iterate over reciprocal vectors in the z dimension. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \param nThreads the number of OpenMP threads to use. * \return the reciprocal space energy. */ template <int rPower> static Real convolveEVCompressedImpl(int myNx, int myNy, int myNz, int startX, int startY, int startZ, Real scaleFactor, const Real *__restrict__ gridPtrIn, Real *__restrict__ gridPtrOut, const RealMat &boxInv, Real volume, Real kappa, const Real *xMods, const Real *yMods, const Real *zMods, const int *xMVals, const int *yMVals, const int *zMVals, RealMat &virial, int nThreads) { Real energy = 0; bool nodeZero = startX == 0 && startY == 0 && startZ == 0; if (rPower > 3 && nodeZero) { // Kernels with rPower>3 are absolutely convergent and should have the m=0 term present. // To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm. Real prefac = 2 * scaleFactor * HELPME_PI * HELPME_SQRTPI * pow(kappa, rPower - 3) / ((rPower - 3) * gammaComputer<Real, rPower>::value * volume); energy += prefac * gridPtrIn[0] * gridPtrIn[0]; } // Ensure the m=0 term convolution product is zeroed for the backtransform; it's been accounted for above. if (nodeZero) gridPtrOut[0] = 0; Real bPrefac = HELPME_PI * HELPME_PI / (kappa * kappa); Real volPrefac = scaleFactor * pow(HELPME_PI, rPower - 1) / (HELPME_SQRTPI * gammaComputer<Real, rPower>::value * volume); size_t nxz = (size_t)myNx * myNz; size_t nyxz = myNy * nxz; Real Vxx = 0, Vxy = 0, Vyy = 0, Vxz = 0, Vyz = 0, Vzz = 0; const Real *boxPtr = boxInv[0]; // Exclude m=0 cell. int start = (nodeZero ? 1 : 0); // Writing the three nested loops in one allows for better load balancing in parallel. #pragma omp parallel for reduction(+ : energy, Vxx, Vxy, Vyy, Vxz, Vyz, Vzz) num_threads(nThreads) for (size_t yxz = start; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; short ky = yxz / nxz; short kx = xz / myNz; short kz = xz % myNz; // We only loop over the first nx/2+1 x values in the complex case; // this accounts for the "missing" complex conjugate values. const int &mx = xMVals[kx]; const int &my = yMVals[ky]; const int &mz = zMVals[kz]; Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz; Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz; Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz; Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ; Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq); Real bSquared = bPrefac * mNormSq; auto gammas = incompleteGammaVirialComputer<Real, 3 - rPower>::compute(bSquared); Real eGamma = std::get<0>(gammas); Real vGamma = std::get<1>(gammas); const Real &gridVal = gridPtrIn[yxz]; size_t minusKx = (mx == 0 ? 0 : (mx < 0 ? kx - 1 : kx + 1)); size_t minusKy = (my == 0 ? 0 : (my < 0 ? ky - 1 : ky + 1)); size_t minusKz = (mz == 0 ? 0 : (mz < 0 ? kz - 1 : kz + 1)); size_t addressXY = minusKy * nxz + minusKx * myNz + kz; size_t addressXZ = ky * nxz + minusKx * myNz + minusKz; size_t addressYZ = minusKy * nxz + (size_t)kx * myNz + minusKz; Real totalPrefac = volPrefac * mTerm * yMods[ky] * xMods[kx] * zMods[kz]; Real influenceFunction = totalPrefac * eGamma; gridPtrOut[yxz] = gridVal * influenceFunction; Real eTerm = influenceFunction * gridVal * gridVal; Real vPrefac = vGamma * totalPrefac / mNormSq * gridVal; Real vTerm = vPrefac * gridVal; Real vTermXY = vPrefac * gridPtrIn[addressXY]; Real vTermXZ = vPrefac * gridPtrIn[addressXZ]; Real vTermYZ = vPrefac * gridPtrIn[addressYZ]; energy += eTerm; Vxx += vTerm * mVecX * mVecX; Vxy -= vTermXY * mVecX * mVecY; Vyy += vTerm * mVecY * mVecY; Vxz -= vTermXZ * mVecX * mVecZ; Vyz -= vTermYZ * mVecY * mVecZ; Vzz += vTerm * mVecZ * mVecZ; } energy /= 2; virial[0][0] -= Vxx - energy; virial[0][1] -= Vxy; virial[0][2] -= Vyy - energy; virial[0][3] -= Vxz; virial[0][4] -= Vyz; virial[0][5] -= Vzz - energy; return energy; } /*! * \brief checkMinimumImageCutoff ensure that the box dimensions satisfy the condition * sphericalCutoff < MIN(W_A, W_B, W_C)/2 * * where * * W_A = |A.(B x C)| / |B x C| * W_B = |B.(C x A)| / |C x A| * W_C = |C.(A x B)| / |A x B| * * \param sphericalCutoff the spherical nonbonded cutoff in Angstrom */ void checkMinimumImageCutoff(int sphericalCutoff) { Real V = cellVolume(); Real ABx = boxVecs_(0, 1) * boxVecs_(1, 2) - boxVecs_(0, 2) * boxVecs_(1, 1); Real ABy = boxVecs_(0, 0) * boxVecs_(1, 2) - boxVecs_(0, 2) * boxVecs_(1, 0); Real ABz = boxVecs_(0, 0) * boxVecs_(1, 1) - boxVecs_(0, 1) * boxVecs_(1, 0); Real ACx = boxVecs_(0, 1) * boxVecs_(2, 2) - boxVecs_(0, 2) * boxVecs_(2, 1); Real ACy = boxVecs_(0, 0) * boxVecs_(2, 2) - boxVecs_(0, 2) * boxVecs_(2, 0); Real ACz = boxVecs_(0, 0) * boxVecs_(2, 1) - boxVecs_(0, 1) * boxVecs_(2, 0); Real BCx = boxVecs_(1, 1) * boxVecs_(2, 2) - boxVecs_(1, 2) * boxVecs_(2, 1); Real BCy = boxVecs_(1, 0) * boxVecs_(2, 2) - boxVecs_(1, 2) * boxVecs_(2, 0); Real BCz = boxVecs_(1, 0) * boxVecs_(2, 1) - boxVecs_(1, 1) * boxVecs_(2, 0); Real AxBnorm = std::sqrt(ABx * ABx + ABy * ABy + ABz * ABz); Real AxCnorm = std::sqrt(ACx * ACx + ACy * ACy + ACz * ACz); Real BxCnorm = std::sqrt(BCx * BCx + BCy * BCy + BCz * BCz); Real minDim = 2 * sphericalCutoff; if (V / AxBnorm < minDim || V / AxCnorm < minDim || V / BxCnorm < minDim) throw std::runtime_error("The cutoff used must be less than half of the minimum of three box widths"); } /*! * \brief sanityChecks just makes sure that inputs have consistent dimensions, and that prerequisites are * initialized. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). * \param parameters the input parameters. * \param coordinates the input coordinates. * \param cartesianOffset an offset to the start of the angular momentum shell for the parameters, in cases where * only a single angular momentum shell is to be processed (rather than all shells up to a given angular momentum). */ void sanityChecks(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, int cartesianOffset = 0) { assertInitialized(); if (parameterAngMom < 0) throw std::runtime_error("Negative parameter angular momentum found where positive value was expected"); if (boxVecs_.isNearZero()) throw std::runtime_error( "Lattice vectors have not been set yet! Call setLatticeVectors(...) before runPME(...);"); if (coordinates.nRows() != parameters.nRows()) throw std::runtime_error( "Inconsistent number of coordinates and parameters; there should be nAtoms of each."); if (parameters.nCols() != (nCartesian(parameterAngMom) - cartesianOffset)) throw std::runtime_error( "Mismatch in the number of parameters provided and the parameter angular momentum"); } /*! * \brief cacheInfluenceFunctionImpl computes the influence function used in convolution, for later use. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param myNx the subset of the grid in the x direction to be handled by this node. * \param myNy the subset of the grid in the y direction to be handled by this node. * \param myNz the subset of the grid in the z direction to be handled by this node. * \param startX the starting reciprocal space sum term handled by this node in the X direction. * \param startY the starting reciprocal space sum term handled by this node in the Y direction. * \param startZ the starting reciprocal space sum term handled by this node in the Z direction. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param gridPtr the Fourier space grid, with ordering YXZ. * \param boxInv the reciprocal lattice vectors. * \param volume the volume of the unit cell. * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param xMods the Fourier space norms of the x B-Splines. * \param yMods the Fourier space norms of the y B-Splines. * \param zMods the Fourier space norms of the z B-Splines. * \param xMVals the integer prefactors to iterate over reciprocal vectors in the x dimension. * \param yMVals the integer prefactors to iterate over reciprocal vectors in the y dimension. * \param zMVals the integer prefactors to iterate over reciprocal vectors in the z dimension. * This vector is incremented, not assigned. * \param nThreads the number of OpenMP threads to use. * \return the energy for the m=0 term. */ template <int rPower> static void cacheInfluenceFunctionImpl(int myNx, int myNy, int myNz, int startX, int startY, int startZ, Real scaleFactor, RealVec &influenceFunction, const RealMat &boxInv, Real volume, Real kappa, const Real *xMods, const Real *yMods, const Real *zMods, const int *xMVals, const int *yMVals, const int *zMVals, int nThreads) { bool nodeZero = startX == 0 && startY == 0 && startZ == 0; size_t nxz = (size_t)myNx * myNz; size_t nyxz = myNy * nxz; influenceFunction.resize(nyxz); Real *gridPtr = influenceFunction.data(); if (nodeZero) gridPtr[0] = 0; Real bPrefac = HELPME_PI * HELPME_PI / (kappa * kappa); Real volPrefac = scaleFactor * pow(HELPME_PI, rPower - 1) / (HELPME_SQRTPI * gammaComputer<Real, rPower>::value * volume); const Real *boxPtr = boxInv[0]; // Exclude m=0 cell. int start = (nodeZero ? 1 : 0); // Writing the three nested loops in one allows for better load balancing in parallel. #pragma omp parallel for num_threads(nThreads) for (size_t yxz = start; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; short ky = yxz / nxz; short kx = xz / myNz; short kz = xz % myNz; const Real mx = (Real)xMVals[kx]; const Real my = (Real)yMVals[ky]; const Real mz = (Real)zMVals[kz]; Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz; Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz; Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz; Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ; Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq); Real bSquared = bPrefac * mNormSq; Real incompleteGammaTerm = incompleteGammaComputer<Real, 3 - rPower>::compute(bSquared); gridPtr[yxz] = volPrefac * incompleteGammaTerm * mTerm * yMods[ky] * xMods[kx] * zMods[kz]; } } /*! * \brief dirEImpl computes the kernel for the direct energy for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return the energy kernel. */ template <int rPower> inline static Real dirEImpl(Real rSquared, Real kappaSquared) { Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; return gammaTerm / denominator; } /*! * \brief dirEFImpl computes the kernels for the direct energy and force for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return a tuple containing the energy and force kernels, respectively. */ template <int rPower> inline static std::tuple<Real, Real> dirEFImpl(Real rSquared, Real kappa, Real kappaSquared) { Real rInv = 1 / rSquared; Real kappaToRPower = kappa; for (int i = 1; i < rPower; ++i) kappaToRPower *= kappa; Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; Real eKernel = gammaTerm / denominator; Real fKernel = -rPower * eKernel * rInv - 2 * rInv * exp(-kappaSquared * rSquared) * kappaToRPower / gammaComputer<Real, rPower>::value; return std::make_tuple(eKernel, fKernel); } /*! * \brief adjEImpl computes the kernel for the adjusted energy for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return the energy kernel. */ template <int rPower> inline static Real adjEImpl(Real rSquared, Real kappaSquared) { Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; return (gammaTerm - 1) / denominator; } /*! * \brief adjEFImpl computes the kernels for the adjusted energy and force for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return a tuple containing the energy and force kernels, respectively. */ template <int rPower> inline static std::tuple<Real, Real> adjEFImpl(Real rSquared, Real kappa, Real kappaSquared) { Real rInv = 1 / rSquared; Real kappaToRPower = kappa; for (int i = 1; i < rPower; ++i) kappaToRPower *= kappa; Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; Real eKernel = (gammaTerm - 1) / denominator; Real fKernel = -rPower * eKernel * rInv - 2 * rInv * exp(-kappaSquared * rSquared) * kappaToRPower / gammaComputer<Real, rPower>::value; return std::make_tuple(eKernel, fKernel); } /*! * \brief slfEImpl computes the coefficient to be applied to the sum of squared parameters for the self energy * due to particles feeling their own potential. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, * etc.). * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof * (e.g. the 1 / [4 pi epslion0] for Coulomb calculations). * \return the coefficient for the sum of squared parameters in the self energy. N.B. there is no self force * associated with this term. */ template <int rPower> static Real slfEImpl(int parameterAngMom, Real kappa, Real scaleFactor) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); return -scaleFactor * std::pow(kappa, rPower) / (rPower * gammaComputer<Real, rPower>::value); } /*! * \brief common_init sets up information that is common to serial and parallel runs. */ void setupCalculationMetadata(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, int maxKA, int maxKB, int maxKC, Real scaleFactor, int nThreads, void *commPtrIn, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC) { int numKSumTermsA = std::min(2 * maxKA + 1, dimA); int numKSumTermsB = std::min(2 * maxKB + 1, dimB); int numKSumTermsC = std::min(2 * maxKC + 1, dimC); AlgorithmType algorithm = numKSumTermsA < dimA && numKSumTermsB < dimB && numKSumTermsC < dimC ? AlgorithmType::CompressedPME : AlgorithmType::PME; kappaHasChanged_ = kappa != kappa_; numNodesHasChanged_ = numNodesA_ != numNodesA || numNodesB_ != numNodesB || numNodesC_ != numNodesC; rPowerHasChanged_ = rPower_ != rPower; gridDimensionHasChanged_ = gridDimensionA_ != dimA || gridDimensionB_ != dimB || gridDimensionC_ != dimC; reciprocalSumDimensionHasChanged_ = numKSumTermsA != numKSumTermsA_ || numKSumTermsB != numKSumTermsB_ || numKSumTermsC != numKSumTermsC_; algorithmHasChanged_ = algorithmType_ != algorithm; splineOrderHasChanged_ = splineOrder_ != splineOrder; scaleFactorHasChanged_ = scaleFactor_ != scaleFactor; if (kappaHasChanged_ || rPowerHasChanged_ || gridDimensionHasChanged_ || splineOrderHasChanged_ || numNodesHasChanged_ || scaleFactorHasChanged_ || algorithmHasChanged_ || requestedNumberOfThreads_ != nThreads) { numNodesA_ = numNodesA; numNodesB_ = numNodesB; numNodesC_ = numNodesC; myNodeRankA_ = myNodeRankB_ = myNodeRankC_ = 0; #if HAVE_MPI == 1 if (commPtrIn) { MPI_Comm const &communicator = *((MPI_Comm *)(commPtrIn)); mpiCommunicator_ = std::unique_ptr<MPIWrapper<Real>>( new MPIWrapper<Real>(communicator, numNodesA, numNodesB, numNodesC)); switch (nodeOrder) { case (NodeOrder::ZYX): myNodeRankA_ = mpiCommunicator_->myRank_ % numNodesA; myNodeRankB_ = (mpiCommunicator_->myRank_ % (numNodesB * numNodesA)) / numNodesA; myNodeRankC_ = mpiCommunicator_->myRank_ / (numNodesB * numNodesA); mpiCommunicatorA_ = mpiCommunicator_->split(myNodeRankC_ * numNodesB + myNodeRankB_, myNodeRankA_); mpiCommunicatorB_ = mpiCommunicator_->split(myNodeRankC_ * numNodesA + myNodeRankA_, myNodeRankB_); mpiCommunicatorC_ = mpiCommunicator_->split(myNodeRankB_ * numNodesA + myNodeRankA_, myNodeRankC_); break; default: throw std::runtime_error("Unknown NodeOrder in helpme::setupCalculationMetadata."); } } #else // Have MPI if (numNodesA * numNodesB * numNodesC > 1) throw std::runtime_error( "a parallel calculation has been setup, but helpme was not compiled with MPI. Make sure you " "compile with -DHAVE_MPI=1 " "in the list of compiler definitions."); #endif // Have MPI rPower_ = rPower; algorithmType_ = algorithm; splineOrder_ = splineOrder; cacheLineSizeInReals_ = static_cast<Real>(sysconf(_SC_PAGESIZE) / sizeof(Real)); requestedNumberOfThreads_ = nThreads; #ifdef _OPENMP nThreads_ = nThreads ? nThreads : omp_get_max_threads(); #else nThreads_ = 1; #endif scaleFactor_ = scaleFactor; kappa_ = kappa; size_t scratchSize; int gridPaddingA = 0, gridPaddingB = 0, gridPaddingC = 0; if (algorithm == AlgorithmType::CompressedPME) { gridDimensionA_ = numNodesA * std::ceil(dimA / (float)numNodesA); gridDimensionB_ = numNodesB * std::ceil(dimB / (float)numNodesB); gridDimensionC_ = numNodesC * std::ceil(dimC / (float)numNodesC); gridPaddingA = (numNodesA > 1 ? splineOrder - 1 : 0); gridPaddingB = (numNodesB > 1 ? splineOrder - 1 : 0); gridPaddingC = (numNodesC > 1 ? splineOrder - 1 : 0); myGridDimensionA_ = gridDimensionA_ / numNodesA + gridPaddingA; myGridDimensionB_ = gridDimensionB_ / numNodesB + gridPaddingB; myGridDimensionC_ = gridDimensionC_ / numNodesC + gridPaddingC; myFirstGridPointA_ = myNodeRankA_ * (myGridDimensionA_ - gridPaddingA); myFirstGridPointB_ = myNodeRankB_ * (myGridDimensionB_ - gridPaddingB); myFirstGridPointC_ = myNodeRankC_ * (myGridDimensionC_ - gridPaddingC); myNumKSumTermsA_ = numNodesA == 1 ? numKSumTermsA : 2 * std::ceil((maxKA + 1.0) / numNodesA); myNumKSumTermsB_ = numNodesB == 1 ? numKSumTermsB : 2 * std::ceil((maxKB + 1.0) / numNodesB); myNumKSumTermsC_ = numNodesC == 1 ? numKSumTermsC : 2 * std::ceil((maxKC + 1.0) / numNodesC); numKSumTermsA_ = myNumKSumTermsA_ * numNodesA; numKSumTermsB_ = myNumKSumTermsB_ * numNodesB; numKSumTermsC_ = myNumKSumTermsC_ * numNodesC; firstKSumTermA_ = myNodeRankA_ * myNumKSumTermsA_; firstKSumTermB_ = myNodeRankB_ * myNumKSumTermsB_; firstKSumTermC_ = myNodeRankC_ * myNumKSumTermsC_; fftHelperA_ = std::move(FFTWWrapper<Real>()); fftHelperB_ = std::move(FFTWWrapper<Real>()); fftHelperC_ = std::move(FFTWWrapper<Real>()); compressionCoefficientsA_ = RealMat(numKSumTermsA_, myGridDimensionA_); compressionCoefficientsB_ = RealMat(numKSumTermsB_, myGridDimensionB_); compressionCoefficientsC_ = RealMat(numKSumTermsC_, myGridDimensionC_); scratchSize = (size_t)std::max(myGridDimensionA_, numKSumTermsA) * std::max(myGridDimensionB_, numKSumTermsB) * std::max(myGridDimensionC_, numKSumTermsC); } else { gridDimensionA_ = findGridSize(dimA, {numNodesA_}); gridDimensionB_ = findGridSize(dimB, {numNodesB_ * numNodesC_}); gridDimensionC_ = findGridSize(dimC, {numNodesA_ * numNodesC_, numNodesB_ * numNodesC_}); gridPaddingA = gridPaddingB = gridPaddingC = 0; myGridDimensionA_ = gridDimensionA_ / numNodesA_; myGridDimensionB_ = gridDimensionB_ / numNodesB_; myGridDimensionC_ = gridDimensionC_ / numNodesC_; complexGridDimensionA_ = gridDimensionA_ / 2 + 1; myComplexGridDimensionA_ = myGridDimensionA_ / 2 + 1; numKSumTermsA_ = gridDimensionA_; numKSumTermsB_ = gridDimensionB_; numKSumTermsC_ = gridDimensionC_; myNumKSumTermsA_ = myComplexGridDimensionA_; myNumKSumTermsB_ = myGridDimensionB_ / numNodesC_; myNumKSumTermsC_ = gridDimensionC_; myFirstGridPointA_ = myNodeRankA_ * myGridDimensionA_; myFirstGridPointB_ = myNodeRankB_ * myGridDimensionB_; myFirstGridPointC_ = myNodeRankC_ * myGridDimensionC_; firstKSumTermA_ = myNodeRankA_ * myComplexGridDimensionA_; firstKSumTermB_ = myNodeRankB_ * myGridDimensionB_ + myNodeRankC_ * myGridDimensionB_ / numNodesC_; firstKSumTermC_ = 0; fftHelperA_ = std::move(FFTWWrapper<Real>(gridDimensionA_)); fftHelperB_ = std::move(FFTWWrapper<Real>(gridDimensionB_)); fftHelperC_ = std::move(FFTWWrapper<Real>(gridDimensionC_)); compressionCoefficientsA_ = RealMat(); compressionCoefficientsB_ = RealMat(); compressionCoefficientsC_ = RealMat(); scratchSize = (size_t)myGridDimensionC_ * myComplexGridDimensionA_ * myGridDimensionB_; } // Grid iterators to correctly wrap the grid when using splines. gridIteratorA_ = makeGridIterator(gridDimensionA_, myFirstGridPointA_, myFirstGridPointA_ + myGridDimensionA_, gridPaddingA); gridIteratorB_ = makeGridIterator(gridDimensionB_, myFirstGridPointB_, myFirstGridPointB_ + myGridDimensionB_, gridPaddingB); gridIteratorC_ = makeGridIterator(gridDimensionC_, myFirstGridPointC_, myFirstGridPointC_ + myGridDimensionC_, gridPaddingC); // Divide C grid points among threads to avoid race conditions. threadedGridIteratorC_.clear(); for (int thread = 0; thread < nThreads_; ++thread) { GridIterator myIterator; for (int cGridPoint = 0; cGridPoint < gridDimensionC_; ++cGridPoint) { std::vector<std::pair<short, short>> splineIterator; for (const auto &fullIterator : gridIteratorC_[cGridPoint]) { if (fullIterator.first % nThreads_ == thread) { splineIterator.push_back(fullIterator); } } splineIterator.shrink_to_fit(); myIterator.push_back(splineIterator); } myIterator.shrink_to_fit(); threadedGridIteratorC_.push_back(myIterator); } threadedGridIteratorC_.shrink_to_fit(); // Assign a large default so that uninitialized values end up generating zeros later on mValsA_.resize(myNumKSumTermsA_, 99); mValsB_.resize(myNumKSumTermsB_, 99); mValsC_.resize(myNumKSumTermsC_, 99); if (algorithm == AlgorithmType::CompressedPME) { // For compressed PME we order the m values as 0, 1, -1, 2, -2, ..., Kmax, -Kmax // because we need to guarantee that +/- m pairs live on the same node for the virial. mValsA_[0] = 0; int startA = myNodeRankA_ ? 0 : 1; for (int k = startA; k < (myNumKSumTermsA_ + (numNodesA_ == 1)) / 2; ++k) { int m = myNodeRankA_ * myNumKSumTermsA_ / 2 + k; mValsA_[startA + 2 * (k - startA)] = m; mValsA_[startA + 2 * (k - startA) + 1] = -m; } mValsB_[0] = 0; int startB = myNodeRankB_ ? 0 : 1; for (int k = startB; k < (myNumKSumTermsB_ + (numNodesB_ == 1)) / 2; ++k) { int m = myNodeRankB_ * myNumKSumTermsB_ / 2 + k; mValsB_[startB + 2 * (k - startB)] = m; mValsB_[startB + 2 * (k - startB) + 1] = -m; } mValsC_[0] = 0; int startC = myNodeRankC_ ? 0 : 1; for (int k = startC; k < (myNumKSumTermsC_ + (numNodesC_ == 1)) / 2; ++k) { int m = myNodeRankC_ * myNumKSumTermsC_ / 2 + k; mValsC_[startC + 2 * (k - startC)] = m; mValsC_[startC + 2 * (k - startC) + 1] = -m; } std::fill(compressionCoefficientsA_[0], compressionCoefficientsA_[1], 1); for (int node = 0; node < numNodesA_; ++node) { int offset = node ? 0 : 1; for (int m = offset; m < (myNumKSumTermsA_ + (numNodesA_ == 1)) / 2; ++m) { int fullM = m + node * myNumKSumTermsA_ / 2; Real *rowPtr = compressionCoefficientsA_[offset + 2 * (fullM - offset)]; for (int n = 0; n < myGridDimensionA_; ++n) { Real exponent = 2 * HELPME_PI * fullM * (n + myFirstGridPointA_) / gridDimensionA_; rowPtr[n] = std::sqrt(2) * std::cos(exponent); rowPtr[n + myGridDimensionA_] = std::sqrt(2) * std::sin(exponent); } } } std::fill(compressionCoefficientsB_[0], compressionCoefficientsB_[1], 1); for (int node = 0; node < numNodesB_; ++node) { int offset = node ? 0 : 1; for (int m = offset; m < (myNumKSumTermsB_ + (numNodesB_ == 1)) / 2; ++m) { int fullM = m + node * myNumKSumTermsB_ / 2; Real *rowPtr = compressionCoefficientsB_[offset + 2 * (fullM - offset)]; for (int n = 0; n < myGridDimensionB_; ++n) { Real exponent = 2 * HELPME_PI * fullM * (n + myFirstGridPointB_) / gridDimensionB_; rowPtr[n] = std::sqrt(2) * std::cos(exponent); rowPtr[n + myGridDimensionB_] = std::sqrt(2) * std::sin(exponent); } } } std::fill(compressionCoefficientsC_[0], compressionCoefficientsC_[1], 1); for (int node = 0; node < numNodesC_; ++node) { int offset = node ? 0 : 1; for (int m = offset; m < (myNumKSumTermsC_ + (numNodesC_ == 1)) / 2; ++m) { int fullM = m + node * myNumKSumTermsC_ / 2; Real *rowPtr = compressionCoefficientsC_[offset + 2 * (fullM - offset)]; for (int n = 0; n < myGridDimensionC_; ++n) { Real exponent = 2 * HELPME_PI * fullM * (n + myFirstGridPointC_) / gridDimensionC_; rowPtr[n] = std::sqrt(2) * std::cos(exponent); rowPtr[n + myGridDimensionC_] = std::sqrt(2) * std::sin(exponent); } } } // Fourier space spline norms. Spline spline = Spline(0, 0, splineOrder_, 0); splineModA_ = spline.invSplineModuli(gridDimensionA_, mValsA_); splineModB_ = spline.invSplineModuli(gridDimensionB_, mValsB_); splineModC_ = spline.invSplineModuli(gridDimensionC_, mValsC_); } else { // For conventional PME we order the m values as 0, 1, 2, 3, .., Kmax, -Kmax, -Kmax+1, .., -2, -1 // because this is consistent with the ordering of m values that emerge from the FFT. for (int ka = 0; ka < myNumKSumTermsA_; ++ka) { mValsA_[ka] = firstKSumTermA_ + (ka + firstKSumTermA_ >= (gridDimensionA_ + 1) / 2 ? ka - gridDimensionA_ : ka); } for (int kb = 0; kb < myNumKSumTermsB_; ++kb) { mValsB_[kb] = firstKSumTermB_ + (kb + firstKSumTermB_ >= (gridDimensionB_ + 1) / 2 ? kb - gridDimensionB_ : kb); } for (int kc = 0; kc < myNumKSumTermsC_; ++kc) { mValsC_[kc] = firstKSumTermC_ + (kc + firstKSumTermC_ >= (gridDimensionC_ + 1) / 2 ? kc - gridDimensionC_ : kc); } // Fourier space spline norms. Spline spline = Spline(0, 0, splineOrder_, 0); auto fullSplineModA = spline.invSplineModuli(gridDimensionA_); auto fullSplineModB = spline.invSplineModuli(gridDimensionB_); auto fullSplineModC = spline.invSplineModuli(gridDimensionC_); scaledRecVecs_ = recVecs_.clone(); scaledRecVecs_.row(0) *= gridDimensionA_; scaledRecVecs_.row(1) *= gridDimensionB_; scaledRecVecs_.row(2) *= gridDimensionC_; splineModA_.resize(myNumKSumTermsA_); splineModB_.resize(myNumKSumTermsB_); splineModC_.resize(myNumKSumTermsC_); std::copy(&fullSplineModA[firstKSumTermA_], &fullSplineModA[firstKSumTermA_ + myNumKSumTermsA_], splineModA_.begin()); std::copy(&fullSplineModB[firstKSumTermB_], &fullSplineModB[firstKSumTermB_ + myNumKSumTermsB_], splineModB_.begin()); std::copy(&fullSplineModC[firstKSumTermC_], &fullSplineModC[firstKSumTermC_ + myNumKSumTermsC_], splineModC_.begin()); } // Set up function pointers by instantiating the appropriate evaluation functions. We could add many more // entries by default here, but don't right now to avoid code bloat. To add an extra rPower kernel is a // trivial cut and paste exercise; just add a new line with the desired 1/R power as the macro's argument. switch (rPower) { ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(1); ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(6); default: std::string msg("Bad rPower requested. To fix this, add the appropriate entry in"); msg += __FILE__; msg += ", line number "; msg += std::to_string(__LINE__ - 5); throw std::runtime_error(msg.c_str()); break; } subsetOfCAlongA_ = myGridDimensionC_ / numNodesA_; subsetOfCAlongB_ = myGridDimensionC_ / numNodesB_; subsetOfBAlongC_ = myGridDimensionB_ / numNodesC_; workSpace1_ = helpme::vector<Complex>(scratchSize); workSpace2_ = helpme::vector<Complex>(scratchSize); #if HAVE_MKL mkl_set_num_threads(nThreads_); #endif } } public: /*! * \brief Spread the parameters onto the charge grid. Generally this shouldn't be called; * use the various computeE() methods instead. This the more efficient version that filters * the atom list and uses pre-computed splines. Therefore, the splineCache_ * member must have been updated via a call to filterAtomsAndBuildSplineCache() first. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \return realGrid the array of discretized parameters (stored in CBA order). */ Real *spreadParameters(int parameterAngMom, const RealMat &parameters) { Real *realGrid = reinterpret_cast<Real *>(workSpace1_.data()); updateAngMomIterator(parameterAngMom); // We need to figure out whether the incoming parameters need to be transformed to scaled fractional // coordinates or not, which is only needed for angular momentum higher than zero. RealMat tempParams; if (parameterAngMom) { tempParams = cartesianTransform(parameterAngMom, false, scaledRecVecs_.transpose(), parameters); } const auto &fractionalParameters = parameterAngMom ? tempParams : parameters; int nComponents = nCartesian(parameterAngMom); size_t numBA = (size_t)myGridDimensionB_ * myGridDimensionA_; #pragma omp parallel num_threads(nThreads_) { #ifdef _OPENMP int threadID = omp_get_thread_num(); #else int threadID = 0; #endif for (size_t row = threadID; row < myGridDimensionC_; row += nThreads_) { std::fill(&realGrid[row * numBA], &realGrid[(row + 1) * numBA], Real(0)); } for (const auto &spline : splinesPerThread_[threadID]) { const auto &cacheEntry = splineCache_[spline]; const int &atom = cacheEntry.absoluteAtomNumber; const auto &splineA = cacheEntry.aSpline; const auto &splineB = cacheEntry.bSpline; const auto &splineC = cacheEntry.cSpline; spreadParametersImpl(atom, realGrid, nComponents, splineA, splineB, splineC, fractionalParameters, threadID); } } return realGrid; } /*! * \brief filterAtomsAndBuildSplineCache builds a list of BSplines for only the atoms to be handled by this node. * \param splineDerivativeLevel the derivative level (parameter angular momentum + energy derivative level) of the * BSplines. * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. */ void filterAtomsAndBuildSplineCache(int splineDerivativeLevel, const RealMat &coords) { assertInitialized(); constexpr float EPS = 1e-6; size_t nAtoms = coords.nRows(); numAtomsPerThread_.resize(nThreads_); splinesPerThread_.resize(nThreads_); gridAtomList_.resize(gridDimensionC_); // Classify atoms to their worker threads first, then construct splines for each thread #pragma omp parallel num_threads(nThreads_) { #ifdef _OPENMP int threadID = omp_get_thread_num(); #else int threadID = 0; #endif for (size_t row = threadID; row < gridDimensionC_; row += nThreads_) { gridAtomList_[row].clear(); } auto &mySplineList = splinesPerThread_[threadID]; const auto &gridIteratorC = threadedGridIteratorC_[threadID]; mySplineList.clear(); size_t myNumAtoms = 0; for (int atom = 0; atom < nAtoms; ++atom) { const Real *atomCoords = coords[atom]; Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) + atomCoords[2] * recVecs_(2, 2) - EPS; cCoord -= floor(cCoord); short cStartingGridPoint = gridDimensionC_ * cCoord; size_t thisAtomsThread = cStartingGridPoint % nThreads_; const auto &cGridIterator = gridIteratorC_[cStartingGridPoint]; if (cGridIterator.size() && thisAtomsThread == threadID) { Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) + atomCoords[2] * recVecs_(2, 0) - EPS; Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) + atomCoords[2] * recVecs_(2, 1) - EPS; // Make sure the fractional coordinates fall in the range 0 <= s < 1 aCoord -= floor(aCoord); bCoord -= floor(bCoord); short aStartingGridPoint = gridDimensionA_ * aCoord; short bStartingGridPoint = gridDimensionB_ * bCoord; const auto &aGridIterator = gridIteratorA_[aStartingGridPoint]; const auto &bGridIterator = gridIteratorB_[bStartingGridPoint]; uint32_t startingGridPoint = cStartingGridPoint * gridDimensionB_ * gridDimensionA_ + bStartingGridPoint * gridDimensionA_ + aStartingGridPoint; if (aGridIterator.size() && bGridIterator.size()) { gridAtomList_[cStartingGridPoint].emplace(startingGridPoint, atom); ++myNumAtoms; } } } numAtomsPerThread_[threadID] = myNumAtoms; } // We could intervene here and do some load balancing by inspecting the list. Currently // the lazy approach of just assuming that the atoms are evenly distributed along c is used. size_t numCacheEntries = std::accumulate(numAtomsPerThread_.begin(), numAtomsPerThread_.end(), 0); // Now we know how many atoms we loop over the dense list, redefining nAtoms accordingly. // The first stage above is to get the number of atoms, so we can avoid calling push_back // and thus avoid the many memory allocations. If the cache is too small, grow it by a // certain scale factor to try and minimize allocations in a not-too-wasteful manner. if (splineCache_.size() < numCacheEntries) { size_t newSize = static_cast<size_t>(1.2 * numCacheEntries); for (int atom = splineCache_.size(); atom < newSize; ++atom) splineCache_.emplace_back(splineOrder_, splineDerivativeLevel); } std::vector<size_t> threadOffset(nThreads_, 0); for (int thread = 1; thread < nThreads_; ++thread) { threadOffset[thread] = threadOffset[thread - 1] + numAtomsPerThread_[thread - 1]; } #pragma omp parallel num_threads(nThreads_) { #ifdef _OPENMP int threadID = omp_get_thread_num(); #else int threadID = 0; #endif size_t entry = threadOffset[threadID]; for (size_t cRow = threadID; cRow < gridDimensionC_; cRow += nThreads_) { for (const auto &gridPointAndAtom : gridAtomList_[cRow]) { size_t atom = gridPointAndAtom.second; const Real *atomCoords = coords[atom]; Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) + atomCoords[2] * recVecs_(2, 0) - EPS; Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) + atomCoords[2] * recVecs_(2, 1) - EPS; Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) + atomCoords[2] * recVecs_(2, 2) - EPS; // Make sure the fractional coordinates fall in the range 0 <= s < 1 aCoord -= floor(aCoord); bCoord -= floor(bCoord); cCoord -= floor(cCoord); short aStartingGridPoint = gridDimensionA_ * aCoord; short bStartingGridPoint = gridDimensionB_ * bCoord; short cStartingGridPoint = gridDimensionC_ * cCoord; auto &atomSplines = splineCache_[entry++]; atomSplines.absoluteAtomNumber = atom; atomSplines.aSpline.update(aStartingGridPoint, gridDimensionA_ * aCoord - aStartingGridPoint, splineOrder_, splineDerivativeLevel); atomSplines.bSpline.update(bStartingGridPoint, gridDimensionB_ * bCoord - bStartingGridPoint, splineOrder_, splineDerivativeLevel); atomSplines.cSpline.update(cStartingGridPoint, gridDimensionC_ * cCoord - cStartingGridPoint, splineOrder_, splineDerivativeLevel); } } } // Finally, find all of the splines that this thread will need to handle #pragma omp parallel num_threads(nThreads_) { #ifdef _OPENMP int threadID = omp_get_thread_num(); #else int threadID = 0; #endif auto &mySplineList = splinesPerThread_[threadID]; mySplineList.clear(); const auto &gridIteratorC = threadedGridIteratorC_[threadID]; size_t count = 0; for (size_t atom = 0; atom < numCacheEntries; ++atom) { if (gridIteratorC[splineCache_[atom].cSpline.startingGridPoint()].size()) { mySplineList.emplace_back(count); } ++count; } } } /*! * \brief cellVolume Compute the volume of the unit cell. * \return volume in units consistent with those used to define the lattice vectors. */ Real cellVolume() { return boxVecs_(0, 0) * boxVecs_(1, 1) * boxVecs_(2, 2) - boxVecs_(0, 0) * boxVecs_(1, 2) * boxVecs_(2, 1) + boxVecs_(0, 1) * boxVecs_(1, 2) * boxVecs_(2, 0) - boxVecs_(0, 1) * boxVecs_(1, 0) * boxVecs_(2, 2) + boxVecs_(0, 2) * boxVecs_(1, 0) * boxVecs_(2, 1) - boxVecs_(0, 2) * boxVecs_(1, 1) * boxVecs_(2, 0); } /*! * \brief minimumImageDeltaR Computes deltaR = positionJ - positionI, applying the minimum image convention to the * result \param positionI \param positionJ \return minimum image deltaR */ std::array<Real, 3> minimumImageDeltaR(const typename helpme::Matrix<Real>::sliceIterator &positionI, const typename helpme::Matrix<Real>::sliceIterator &positionJ) { // This implementation could be specialized for orthorhombic unit cells, but we stick with a general // implementation for now. The difference in real (R) space Real dxR = positionJ[0] - positionI[0]; Real dyR = positionJ[1] - positionI[1]; Real dzR = positionJ[2] - positionI[2]; // Convert to fractional coordinate (S) space Real dxS = recVecs_[0][0] * dxR + recVecs_[0][1] * dyR + recVecs_[0][2] * dzR; Real dyS = recVecs_[1][0] * dxR + recVecs_[1][1] * dyR + recVecs_[1][2] * dzR; Real dzS = recVecs_[2][0] * dxR + recVecs_[2][1] * dyR + recVecs_[2][2] * dzR; // Apply translations in fractional coordinates to find the shift vectors Real sxS = std::floor(dxS + 0.5f); Real syS = std::floor(dyS + 0.5f); Real szS = std::floor(dzS + 0.5f); // Convert fractional coordinate shifts to real space Real sxR = boxVecs_[0][0] * sxS + boxVecs_[0][1] * syS + boxVecs_[0][2] * szS; Real syR = boxVecs_[1][0] * sxS + boxVecs_[1][1] * syS + boxVecs_[1][2] * szS; Real szR = boxVecs_[2][0] * sxS + boxVecs_[2][1] * syS + boxVecs_[2][2] * szS; // Shift the difference vector to find the minimum image return {dxR - sxR, dyR - syR, dzR - szR}; } /*! * \brief Sets the unit cell lattice vectors, with units consistent with those used to specify coordinates. * \param A the A lattice parameter in units consistent with the coordinates. * \param B the B lattice parameter in units consistent with the coordinates. * \param C the C lattice parameter in units consistent with the coordinates. * \param alpha the alpha lattice parameter in degrees. * \param beta the beta lattice parameter in degrees. * \param gamma the gamma lattice parameter in degrees. * \param latticeType how to arrange the lattice vectors. Options are * ShapeMatrix: enforce a symmetric representation of the lattice vectors [c.f. S. Nosé and M. L. Klein, * Mol. Phys. 50 1055 (1983)] particularly appendix C. * XAligned: make the A vector coincide with the X axis, the B vector fall in the XY plane, and the C vector * take the appropriate alignment to completely define the system. */ void setLatticeVectors(Real A, Real B, Real C, Real alpha, Real beta, Real gamma, LatticeType latticeType) { if (A != cellA_ || B != cellB_ || C != cellC_ || alpha != cellAlpha_ || beta != cellBeta_ || gamma != cellGamma_ || latticeType != latticeType_) { if (latticeType == LatticeType::ShapeMatrix) { RealMat HtH(3, 3); HtH(0, 0) = A * A; HtH(1, 1) = B * B; HtH(2, 2) = C * C; const float TOL = 1e-4f; // Check for angles very close to 90, to avoid noise from the eigensolver later on. HtH(0, 1) = HtH(1, 0) = std::abs(gamma - 90) < TOL ? 0 : A * B * std::cos(HELPME_PI * gamma / 180); HtH(0, 2) = HtH(2, 0) = std::abs(beta - 90) < TOL ? 0 : A * C * std::cos(HELPME_PI * beta / 180); HtH(1, 2) = HtH(2, 1) = std::abs(alpha - 90) < TOL ? 0 : B * C * std::cos(HELPME_PI * alpha / 180); auto eigenTuple = HtH.diagonalize(); RealMat evalsReal = std::get<0>(eigenTuple); RealMat evecs = std::get<1>(eigenTuple); for (int i = 0; i < 3; ++i) evalsReal(i, 0) = sqrt(evalsReal(i, 0)); boxVecs_.setZero(); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { boxVecs_(i, j) += evecs(i, k) * evecs(j, k) * evalsReal(k, 0); } } } recVecs_ = boxVecs_.inverse(); } else if (latticeType == LatticeType::XAligned) { boxVecs_(0, 0) = A; boxVecs_(0, 1) = 0; boxVecs_(0, 2) = 0; boxVecs_(1, 0) = B * std::cos(HELPME_PI / 180 * gamma); boxVecs_(1, 1) = B * std::sin(HELPME_PI / 180 * gamma); boxVecs_(1, 2) = 0; boxVecs_(2, 0) = C * std::cos(HELPME_PI / 180 * beta); boxVecs_(2, 1) = (B * C * cos(HELPME_PI / 180 * alpha) - boxVecs_(2, 0) * boxVecs_(1, 0)) / boxVecs_(1, 1); boxVecs_(2, 2) = std::sqrt(C * C - boxVecs_(2, 0) * boxVecs_(2, 0) - boxVecs_(2, 1) * boxVecs_(2, 1)); } else { throw std::runtime_error("Unknown lattice type in setLatticeVectors"); } recVecs_ = boxVecs_.inverse(); scaledRecVecs_ = recVecs_.clone(); scaledRecVecs_.row(0) *= gridDimensionA_; scaledRecVecs_.row(1) *= gridDimensionB_; scaledRecVecs_.row(2) *= gridDimensionC_; cellA_ = A; cellB_ = B; cellC_ = C; cellAlpha_ = alpha; cellBeta_ = beta; cellGamma_ = gamma; latticeType_ = latticeType; unitCellHasChanged_ = true; } else { unitCellHasChanged_ = false; } } /*! * \brief Performs the forward 3D FFT of the discretized parameter grid using the compressed PME algorithm. * \param realGrid the array of discretized parameters (stored in CBA order, * with A being the fast running index) to be transformed. * \return Pointer to the transformed grid, which is stored in one of the buffers in BAC order. */ Real *compressedForwardTransform(Real *realGrid) { Real *__restrict__ buffer1, *__restrict__ buffer2; if (realGrid == reinterpret_cast<Real *>(workSpace1_.data())) { buffer1 = reinterpret_cast<Real *>(workSpace2_.data()); buffer2 = reinterpret_cast<Real *>(workSpace1_.data()); } else { buffer1 = reinterpret_cast<Real *>(workSpace1_.data()); buffer2 = reinterpret_cast<Real *>(workSpace2_.data()); } // Transform A index contractABxCWithDxC<Real>(realGrid, compressionCoefficientsA_[0], myGridDimensionC_ * myGridDimensionB_, myGridDimensionA_, numKSumTermsA_, buffer1); // Sort CBA->CAB permuteABCtoACB(buffer1, myGridDimensionC_, myGridDimensionB_, numKSumTermsA_, buffer2, nThreads_); // Transform B index contractABxCWithDxC<Real>(buffer2, compressionCoefficientsB_[0], myGridDimensionC_ * numKSumTermsA_, myGridDimensionB_, numKSumTermsB_, buffer1); // Sort CAB->BAC permuteABCtoCBA(buffer1, myGridDimensionC_, numKSumTermsA_, numKSumTermsB_, buffer2, nThreads_); // Transform C index contractABxCWithDxC<Real>(buffer2, compressionCoefficientsC_[0], numKSumTermsB_ * numKSumTermsA_, myGridDimensionC_, numKSumTermsC_, buffer1); #if HAVE_MPI == 1 int numNodes = numNodesA_ * numNodesB_ * numNodesC_; if (numNodes > 1) { // Resort the data to be grouped by node, for communication for (int node = 0; node < numNodes; ++node) { int nodeStartA = myNumKSumTermsA_ * (node % numNodesA_); int nodeStartB = myNumKSumTermsB_ * ((node % (numNodesB_ * numNodesA_)) / numNodesA_); int nodeStartC = myNumKSumTermsC_ * (node / (numNodesB_ * numNodesA_)); Real *outPtr = buffer2 + node * myNumKSumTermsA_ * myNumKSumTermsB_ * myNumKSumTermsC_; for (int B = 0; B < myNumKSumTermsB_; ++B) { const Real *inPtrB = buffer1 + (nodeStartB + B) * numKSumTermsA_ * numKSumTermsC_; for (int A = 0; A < myNumKSumTermsA_; ++A) { const Real *inPtrBA = inPtrB + (nodeStartA + A) * numKSumTermsC_; const Real *inPtrBAC = inPtrBA + nodeStartC; std::copy(inPtrBAC, inPtrBAC + myNumKSumTermsC_, outPtr); outPtr += myNumKSumTermsC_; } } } mpiCommunicator_->reduceScatterBlock(buffer2, buffer1, myNumKSumTermsA_ * myNumKSumTermsB_ * myNumKSumTermsC_); } #endif return buffer1; } /*! * \brief Performs the forward 3D FFT of the discretized parameter grid. * \param realGrid the array of discretized parameters (stored in CBA order, * with A being the fast running index) to be transformed. * \return Pointer to the transformed grid, which is stored in one of the buffers in BAC order. */ Complex *forwardTransform(Real *realGrid) { Real *__restrict__ realCBA; Complex *__restrict__ buffer1, *__restrict__ buffer2; if (realGrid == reinterpret_cast<Real *>(workSpace1_.data())) { realCBA = reinterpret_cast<Real *>(workSpace2_.data()); buffer1 = workSpace2_.data(); buffer2 = workSpace1_.data(); } else { realCBA = reinterpret_cast<Real *>(workSpace1_.data()); buffer1 = workSpace1_.data(); buffer2 = workSpace2_.data(); } #if HAVE_MPI == 1 if (numNodesA_ > 1) { // Communicate A along columns mpiCommunicatorA_->allToAll(realGrid, realCBA, subsetOfCAlongA_ * myGridDimensionA_ * myGridDimensionB_); // Resort the data to end up with realGrid holding a full row of A data, for B pencil and C subset. for (int c = 0; c < subsetOfCAlongA_; ++c) { Real *outC = realGrid + c * myGridDimensionB_ * gridDimensionA_; for (int b = 0; b < myGridDimensionB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Real *inPtr = realCBA + (chunk * subsetOfCAlongA_ + c) * myGridDimensionB_ * myGridDimensionA_ + b * myGridDimensionA_; std::copy(inPtr, inPtr + myGridDimensionA_, outC + b * gridDimensionA_ + chunk * myGridDimensionA_); } } } } #endif // Each parallel node allocates buffers of length dimA/(2 numNodesA)+1 for A, leading to a total of // dimA/2 + numNodesA = complexDimA+numNodesA-1 if dimA is even // and // numNodesA (dimA-1)/2 + numNodesA = complexDimA + numNodesA/2-1 if dimA is odd // We just allocate the larger size here, remembering that the final padding values on the last node // will all be allocated to zero and will not contribute to the final answer. const size_t scratchRowDim = complexGridDimensionA_ + numNodesA_ - 1; helpme::vector<Complex> buffer(nThreads_ * scratchRowDim); // A transform, with instant sort to CAB ordering for each local block #pragma omp parallel num_threads(nThreads_) { #ifdef _OPENMP int threadID = omp_get_thread_num(); #else int threadID = 0; #endif auto scratch = &buffer[threadID * scratchRowDim]; #pragma omp for for (int c = 0; c < subsetOfCAlongA_; ++c) { for (int b = 0; b < myGridDimensionB_; ++b) { Real *gridPtr = realGrid + c * myGridDimensionB_ * gridDimensionA_ + b * gridDimensionA_; fftHelperA_.transform(gridPtr, scratch); for (int chunk = 0; chunk < numNodesA_; ++chunk) { for (int a = 0; a < myComplexGridDimensionA_; ++a) { buffer1[(chunk * subsetOfCAlongA_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ + a * myGridDimensionB_ + b] = scratch[chunk * myComplexGridDimensionA_ + a]; } } } } } #if HAVE_MPI == 1 // Communicate A back to blocks if (numNodesA_ > 1) { mpiCommunicatorA_->allToAll(buffer1, buffer2, subsetOfCAlongA_ * myComplexGridDimensionA_ * myGridDimensionB_); std::swap(buffer1, buffer2); } // Communicate B along rows if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer1, buffer2, subsetOfCAlongB_ * myComplexGridDimensionA_ * myGridDimensionB_); // Resort the data to end up with the buffer holding a full row of B data, for A pencil and C subset. for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *cPtr = buffer1 + c * myComplexGridDimensionA_ * gridDimensionB_; for (int a = 0; a < myComplexGridDimensionA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ + a * myGridDimensionB_; std::copy(inPtr, inPtr + myGridDimensionB_, cPtr + a * gridDimensionB_ + chunk * myGridDimensionB_); } } } } #endif // B transform size_t numCA = (size_t)subsetOfCAlongB_ * myComplexGridDimensionA_; #pragma omp parallel for num_threads(nThreads_) for (size_t ca = 0; ca < numCA; ++ca) { fftHelperB_.transform(buffer1 + ca * gridDimensionB_, FFTW_FORWARD); } #if HAVE_MPI == 1 if (numNodesB_ > 1) { for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *zPtr = buffer1 + c * myComplexGridDimensionA_ * gridDimensionB_; for (int a = 0; a < myComplexGridDimensionA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = zPtr + a * gridDimensionB_ + chunk * myGridDimensionB_; Complex *outPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ + a * myGridDimensionB_; std::copy(inPtr, inPtr + myGridDimensionB_, outPtr); } } } // Communicate B back to blocks mpiCommunicatorB_->allToAll(buffer2, buffer1, subsetOfCAlongB_ * myComplexGridDimensionA_ * myGridDimensionB_); } #endif // sort local blocks from CAB to BAC order permuteABCtoCBA(buffer1, myGridDimensionC_, myComplexGridDimensionA_, myGridDimensionB_, buffer2, nThreads_); #if HAVE_MPI == 1 if (numNodesC_ > 1) { // Communicate C along columns mpiCommunicatorC_->allToAll(buffer2, buffer1, subsetOfBAlongC_ * myComplexGridDimensionA_ * myGridDimensionC_); for (int b = 0; b < subsetOfBAlongC_; ++b) { Complex *outPtrB = buffer2 + b * myComplexGridDimensionA_ * gridDimensionC_; for (int a = 0; a < myComplexGridDimensionA_; ++a) { Complex *outPtrBA = outPtrB + a * gridDimensionC_; for (int chunk = 0; chunk < numNodesC_; ++chunk) { Complex *inPtr = buffer1 + (chunk * subsetOfBAlongC_ + b) * myComplexGridDimensionA_ * myGridDimensionC_ + a * myGridDimensionC_; std::copy(inPtr, inPtr + myGridDimensionC_, outPtrBA + chunk * myGridDimensionC_); } } } } #endif // C transform size_t numBA = (size_t)subsetOfBAlongC_ * myComplexGridDimensionA_; #pragma omp parallel for num_threads(nThreads_) for (size_t ba = 0; ba < numBA; ++ba) { fftHelperC_.transform(buffer2 + ba * gridDimensionC_, FFTW_FORWARD); } return buffer2; } /*! * \brief Performs the inverse 3D FFT. * \param convolvedGrid the complex array of discretized parameters convolved with the influence function * (stored in BAC order, with C being the fast running index) to be transformed. * \return Pointer to the potential grid, which is stored in one of the buffers in CBA order. */ Real *inverseTransform(Complex *convolvedGrid) { Complex *__restrict__ buffer1, *__restrict__ buffer2; // Setup scratch, taking care not to overwrite the convolved grid. if (convolvedGrid == workSpace1_.data()) { buffer1 = workSpace2_.data(); buffer2 = workSpace1_.data(); } else { buffer1 = workSpace1_.data(); buffer2 = workSpace2_.data(); } // C transform size_t numYX = (size_t)subsetOfBAlongC_ * myComplexGridDimensionA_; #pragma omp parallel for num_threads(nThreads_) for (size_t yx = 0; yx < numYX; ++yx) { fftHelperC_.transform(convolvedGrid + yx * gridDimensionC_, FFTW_BACKWARD); } #if HAVE_MPI == 1 if (numNodesC_ > 1) { // Communicate C back to blocks for (int b = 0; b < subsetOfBAlongC_; ++b) { Complex *inPtrB = convolvedGrid + b * myComplexGridDimensionA_ * gridDimensionC_; for (int a = 0; a < myComplexGridDimensionA_; ++a) { Complex *inPtrBA = inPtrB + a * gridDimensionC_; for (int chunk = 0; chunk < numNodesC_; ++chunk) { Complex *inPtrBAC = inPtrBA + chunk * myGridDimensionC_; Complex *outPtr = buffer1 + (chunk * subsetOfBAlongC_ + b) * myComplexGridDimensionA_ * myGridDimensionC_ + a * myGridDimensionC_; std::copy(inPtrBAC, inPtrBAC + myGridDimensionC_, outPtr); } } } mpiCommunicatorC_->allToAll(buffer1, buffer2, subsetOfBAlongC_ * myComplexGridDimensionA_ * myGridDimensionC_); } #endif // sort local blocks from BAC to CAB order permuteABCtoCBA(buffer2, myGridDimensionB_, myComplexGridDimensionA_, myGridDimensionC_, buffer1, nThreads_); #if HAVE_MPI == 1 // Communicate B along rows if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer1, buffer2, subsetOfCAlongB_ * myComplexGridDimensionA_ * myGridDimensionB_); // Resort the data to end up with the buffer holding a full row of B data, for A pencil and C subset. for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *cPtr = buffer1 + c * myComplexGridDimensionA_ * gridDimensionB_; for (int a = 0; a < myComplexGridDimensionA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ + a * myGridDimensionB_; std::copy(inPtr, inPtr + myGridDimensionB_, cPtr + a * gridDimensionB_ + chunk * myGridDimensionB_); } } } } #endif // B transform with instant sort of local blocks from CAB -> CBA order size_t numCA = (size_t)subsetOfCAlongB_ * myComplexGridDimensionA_; #pragma omp parallel for num_threads(nThreads_) for (size_t ca = 0; ca < numCA; ++ca) { fftHelperB_.transform(buffer1 + ca * gridDimensionB_, FFTW_BACKWARD); } #pragma omp parallel for num_threads(nThreads_) for (int c = 0; c < subsetOfCAlongB_; ++c) { for (int a = 0; a < myComplexGridDimensionA_; ++a) { int cx = c * myComplexGridDimensionA_ * gridDimensionB_ + a * gridDimensionB_; for (int b = 0; b < myGridDimensionB_; ++b) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { int cb = (chunk * subsetOfCAlongB_ + c) * myGridDimensionB_ * myComplexGridDimensionA_ + b * myComplexGridDimensionA_; buffer2[cb + a] = buffer1[cx + chunk * myGridDimensionB_ + b]; } } } } #if HAVE_MPI == 1 // Communicate B back to blocks if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer2, buffer1, subsetOfCAlongB_ * myComplexGridDimensionA_ * myGridDimensionB_); } else { std::swap(buffer1, buffer2); } // Communicate A along rows if (numNodesA_ > 1) { mpiCommunicatorA_->allToAll(buffer1, buffer2, subsetOfCAlongA_ * myComplexGridDimensionA_ * myGridDimensionB_); // Resort the data to end up with the buffer holding a full row of A data, for B pencil and C subset. for (int c = 0; c < subsetOfCAlongA_; ++c) { Complex *cPtr = buffer1 + c * myGridDimensionB_ * complexGridDimensionA_; for (int b = 0; b < myGridDimensionB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongA_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ + b * myComplexGridDimensionA_; std::copy(inPtr, inPtr + myComplexGridDimensionA_, cPtr + b * complexGridDimensionA_ + chunk * myComplexGridDimensionA_); } } } } #else std::swap(buffer1, buffer2); #endif // A transform Real *realGrid = reinterpret_cast<Real *>(buffer2); #pragma omp parallel for num_threads(nThreads_) for (int cb = 0; cb < subsetOfCAlongA_ * myGridDimensionB_; ++cb) { fftHelperA_.transform(buffer1 + cb * complexGridDimensionA_, realGrid + cb * gridDimensionA_); } #if HAVE_MPI == 1 // Communicate A back to blocks if (numNodesA_ > 1) { Real *realGrid2 = reinterpret_cast<Real *>(buffer1); for (int c = 0; c < subsetOfCAlongA_; ++c) { Real *cPtr = realGrid + c * myGridDimensionB_ * gridDimensionA_; for (int b = 0; b < myGridDimensionB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Real *outPtr = realGrid2 + (chunk * subsetOfCAlongA_ + c) * myGridDimensionB_ * myGridDimensionA_ + b * myGridDimensionA_; Real *inPtr = cPtr + b * gridDimensionA_ + chunk * myGridDimensionA_; std::copy(inPtr, inPtr + myGridDimensionA_, outPtr); } } } mpiCommunicatorA_->allToAll(realGrid2, realGrid, subsetOfCAlongA_ * myGridDimensionB_ * myGridDimensionA_); } #endif return realGrid; } /*! * \brief Performs the backward 3D FFT of the discretized parameter grid using the compressed PME algorithm. * \param reciprocalGrid the reciprocal space potential grid (stored in BAC order, * with C being the fast running index) to be transformed. * \return Pointer to the transformed grid, which is stored in one of the buffers in CBA order. */ Real *compressedInverseTransform(Real *reciprocalGrid) { Real *__restrict__ buffer1, *__restrict__ buffer2; if (reciprocalGrid == reinterpret_cast<Real *>(workSpace1_.data())) { buffer1 = reinterpret_cast<Real *>(workSpace2_.data()); buffer2 = reinterpret_cast<Real *>(workSpace1_.data()); } else { buffer1 = reinterpret_cast<Real *>(workSpace1_.data()); buffer2 = reinterpret_cast<Real *>(workSpace2_.data()); } // Make the reciprocal dimensions the fast running indices compressionCoefficientsA_.transposeInPlace(); compressionCoefficientsB_.transposeInPlace(); compressionCoefficientsC_.transposeInPlace(); #if HAVE_MPI == 1 int numNodes = numNodesA_ * numNodesB_ * numNodesC_; if (numNodes > 1) { mpiCommunicator_->allGather(buffer2, buffer1, myNumKSumTermsA_ * myNumKSumTermsB_ * myNumKSumTermsC_); // Resort the data to be grouped by node, for communication for (int node = 0; node < numNodes; ++node) { int nodeStartA = myNumKSumTermsA_ * (node % numNodesA_); int nodeStartB = myNumKSumTermsB_ * ((node % (numNodesB_ * numNodesA_)) / numNodesA_); int nodeStartC = myNumKSumTermsC_ * (node / (numNodesB_ * numNodesA_)); Real *inPtr = buffer1 + node * myNumKSumTermsA_ * myNumKSumTermsB_ * myNumKSumTermsC_; for (int B = 0; B < myNumKSumTermsB_; ++B) { Real *outPtrB = buffer2 + (nodeStartB + B) * numKSumTermsA_ * numKSumTermsC_; for (int A = 0; A < myNumKSumTermsA_; ++A) { Real *outPtrBA = outPtrB + (nodeStartA + A) * numKSumTermsC_; Real *outPtrBAC = outPtrBA + nodeStartC; std::copy(inPtr, inPtr + myNumKSumTermsC_, outPtrBAC); inPtr += myNumKSumTermsC_; } } } } #endif // Transform C index contractABxCWithDxC<Real>(buffer2, compressionCoefficientsC_[0], numKSumTermsB_ * numKSumTermsA_, numKSumTermsC_, myGridDimensionC_, buffer1); // Sort BAC->CAB permuteABCtoCBA(buffer1, numKSumTermsB_, numKSumTermsA_, myGridDimensionC_, buffer2, nThreads_); // Transform B index contractABxCWithDxC<Real>(buffer2, compressionCoefficientsB_[0], myGridDimensionC_ * numKSumTermsA_, numKSumTermsB_, myGridDimensionB_, buffer1); // Sort CAB->CBA permuteABCtoACB(buffer1, myGridDimensionC_, numKSumTermsA_, myGridDimensionB_, buffer2, nThreads_); // Transform A index contractABxCWithDxC<Real>(buffer2, compressionCoefficientsA_[0], myGridDimensionC_ * myGridDimensionB_, numKSumTermsA_, myGridDimensionA_, buffer1); // Make the grid dimensions the fast running indices again compressionCoefficientsA_.transposeInPlace(); compressionCoefficientsB_.transposeInPlace(); compressionCoefficientsC_.transposeInPlace(); return buffer1; } /*! * \brief convolveE performs the convolution on a compressed PME transformed Grid * \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering. * \return the reciprocal space energy. */ Real convolveE(Real *transformedGrid) { updateInfluenceFunction(); size_t nxz = (size_t)myNumKSumTermsA_ * myNumKSumTermsC_; size_t nyxz = myNumKSumTermsB_ * nxz; bool iAmNodeZero = (myNodeRankA_ == 0 && myNodeRankB_ == 0 && myNodeRankC_ == 0); Real *influenceFunction = cachedInfluenceFunction_.data(); Real energy = 0; if (rPower_ > 3 && iAmNodeZero) { // Kernels with rPower>3 are absolutely convergent and should have the m=0 term present. // To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm. Real prefac = 2 * scaleFactor_ * HELPME_PI * HELPME_SQRTPI * pow(kappa_, rPower_ - 3) / ((rPower_ - 3) * nonTemplateGammaComputer<Real>(rPower_) * cellVolume()); energy += prefac * transformedGrid[0] * transformedGrid[0]; } if (iAmNodeZero) transformedGrid[0] = 0; // Writing the three nested loops in one allows for better load balancing in parallel. #pragma omp parallel for reduction(+ : energy) num_threads(nThreads_) for (size_t yxz = 0; yxz < nyxz; ++yxz) { energy += transformedGrid[yxz] * transformedGrid[yxz] * influenceFunction[yxz]; transformedGrid[yxz] *= influenceFunction[yxz]; } return energy / 2; } /*! * \brief convolveE performs the convolution of a standard PME transformed grid * \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering. * \return the reciprocal space energy. */ Real convolveE(Complex *transformedGrid) { updateInfluenceFunction(); size_t nxz = (size_t)myNumKSumTermsA_ * myNumKSumTermsC_; size_t nyxz = myNumKSumTermsB_ * nxz; bool iAmNodeZero = (myNodeRankA_ == 0 && myNodeRankB_ == 0 && myNodeRankC_ == 0); Real *influenceFunction = cachedInfluenceFunction_.data(); bool useConjugateSymmetry = algorithmType_ == AlgorithmType::PME; Real energy = 0; if (rPower_ > 3 && iAmNodeZero) { // Kernels with rPower>3 are absolutely convergent and should have the m=0 term present. // To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm. Real prefac = 2 * scaleFactor_ * HELPME_PI * HELPME_SQRTPI * pow(kappa_, rPower_ - 3) / ((rPower_ - 3) * nonTemplateGammaComputer<Real>(rPower_) * cellVolume()); energy += prefac * std::norm(transformedGrid[0]); } if (iAmNodeZero) transformedGrid[0] = Complex(0, 0); const size_t numCTerms(myNumKSumTermsC_); #pragma omp parallel for reduction(+ : energy) num_threads(nThreads_) for (size_t yxz = 0; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; int kx = firstKSumTermA_ + xz / numCTerms; // We only loop over the first nx/2+1 x values; this // accounts for the "missing" complex conjugate values. Real permPrefac = useConjugateSymmetry && kx != 0 && kx != complexGridDimensionA_ - 1 ? 2 : 1; Real structFactorNorm = transformedGrid[yxz].real() * transformedGrid[yxz].real() + transformedGrid[yxz].imag() * transformedGrid[yxz].imag(); energy += permPrefac * structFactorNorm * influenceFunction[yxz]; transformedGrid[yxz] *= influenceFunction[yxz]; } return energy / 2; } /*! * \brief convolveEV A wrapper to determine the correct convolution function to call, including virial, for * the compressed PME algorithm. * \param transformedGrid the pointer to the Fourier space array holding the transformed grid in YXZ ordering. * \param convolvedGrid the (output) pointer to the Fourier space array holding the convolved grid in YXZ ordering. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the reciprocal space energy. */ Real convolveEV(const Real *transformedGrid, Real *&convolvedGrid, RealMat &virial) { convolvedGrid = transformedGrid == reinterpret_cast<Real *>(workSpace1_.data()) ? reinterpret_cast<Real *>(workSpace2_.data()) : reinterpret_cast<Real *>(workSpace1_.data()); return convolveEVCompressedFxn_( myNumKSumTermsA_, myNumKSumTermsB_, myNumKSumTermsC_, firstKSumTermA_, firstKSumTermB_, firstKSumTermC_, scaleFactor_, transformedGrid, convolvedGrid, recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0], &splineModC_[0], mValsA_.data(), mValsB_.data(), mValsC_.data(), virial, nThreads_); } /*! * \brief convolveEV A wrapper to determine the correct convolution function to call, including virial, for * the conventional PME algorithm. * \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the reciprocal space energy. */ Real convolveEV(Complex *transformedGrid, RealMat &virial) { return convolveEVFxn_(true, complexGridDimensionA_, myNumKSumTermsA_, myNumKSumTermsB_, myNumKSumTermsC_, firstKSumTermA_, firstKSumTermB_, firstKSumTermC_, scaleFactor_, transformedGrid, recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0], &splineModC_[0], mValsA_.data(), mValsB_.data(), mValsC_.data(), virial, nThreads_); } /*! * \brief Probes the potential grid to get the forces. Generally this shouldn't be called; * use the various computeE() methods instead. This is the faster version that uses * the filtered atom list and uses pre-computed splines. Therefore, the splineCache_ * member must have been updated via a call to filterAtomsAndBuildSplineCache() first. * * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * \param virial pointer to the virial vector if needed */ void probeGrid(const Real *potentialGrid, int parameterAngMom, const RealMat &parameters, RealMat &forces, Real *virial = nullptr) { updateAngMomIterator(parameterAngMom + 1); int nComponents = nCartesian(parameterAngMom); int nForceComponents = nCartesian(parameterAngMom + 1); const Real *paramPtr = parameters[0]; // Find how many multiples of the cache line size are needed // to ensure that each thread hits a unique page. size_t nAtoms = std::accumulate(numAtomsPerThread_.begin(), numAtomsPerThread_.end(), 0); size_t rowSize = std::ceil(nForceComponents / cacheLineSizeInReals_) * cacheLineSizeInReals_; if (fractionalPhis_.nRows() < nAtoms || fractionalPhis_.nCols() < rowSize) { fractionalPhis_ = RealMat(nAtoms, rowSize); } RealMat fractionalParams; Real cartPhi[3]; if (parameterAngMom) { fractionalParams = cartesianTransform(parameterAngMom, false, scaledRecVecs_.transpose(), parameters); if (virial) { if (parameterAngMom > 1) { // The structure factor derivatives below are only implemented up to dipoles for now throw std::runtime_error("Only multipoles up to L=1 are supported if the virial is requested"); } } } #pragma omp parallel num_threads(nThreads_) { #ifdef _OPENMP int threadID = omp_get_thread_num(); #else int threadID = 0; #endif #pragma omp for for (size_t atom = 0; atom < nAtoms; ++atom) { const auto &cacheEntry = splineCache_[atom]; const auto &absAtom = cacheEntry.absoluteAtomNumber; const auto &splineA = cacheEntry.aSpline; const auto &splineB = cacheEntry.bSpline; const auto &splineC = cacheEntry.cSpline; if (parameterAngMom) { Real *myScratch = fractionalPhis_[threadID % nThreads_]; probeGridImpl(absAtom, potentialGrid, nComponents, nForceComponents, splineA, splineB, splineC, myScratch, fractionalParams[absAtom], forces[absAtom]); // Add extra virial terms coming from the derivative of the structure factor. // See eq. 2.16 of https://doi.org/10.1063/1.1630791 for details if (virial) { // Get the potential in the Cartesian basis matrixVectorProduct(scaledRecVecs_, &myScratch[1], &cartPhi[0]); const Real *parm = parameters[absAtom]; virial[0] += cartPhi[0] * parm[1]; virial[1] += 0.5f * (cartPhi[0] * parm[2] + cartPhi[1] * parm[1]); virial[2] += cartPhi[1] * parm[2]; virial[3] += 0.5f * (cartPhi[0] * parm[3] + cartPhi[2] * parm[1]); virial[4] += 0.5f * (cartPhi[1] * parm[3] + cartPhi[2] * parm[2]); virial[5] += cartPhi[2] * parm[3]; } } else { probeGridImpl(potentialGrid, splineA, splineB, splineC, paramPtr[absAtom], forces[absAtom]); } } } } /*! * \brief computeESlf computes the Ewald self interaction energy. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \return the self energy. */ Real computeESlf(int parameterAngMom, const RealMat &parameters) { assertInitialized(); auto prefac = slfEFxn_(parameterAngMom, kappa_, scaleFactor_); size_t nAtoms = parameters.nRows(); Real sumCoefs = 0; for (size_t atom = 0; atom < nAtoms; ++atom) { sumCoefs += parameters(atom, 0) * parameters(atom, 0); } return prefac * sumCoefs; } /*! * \brief computeEDir computes the direct space energy. This is provided mostly for debugging and testing * purposes; generally the host program should provide the pairwise interactions. \param pairList dense list of * atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom the angular momentum of * the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param parameters the list of * parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). For a parameter with * angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the * fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \return the direct space energy. */ Real computeEDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { if (parameterAngMom) throw std::runtime_error("Multipole direct terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); energy += parameters(i, 0) * parameters(j, 0) * dirEFxn_(rSquared, kappaSquared); } return scaleFactor_ * energy; } /*! * \brief computeEFDir computes the direct space energy and force. This is provided mostly for debugging and * testing purposes; generally the host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the direct space energy. */ Real computeEFDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = dirEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; auto force = deltaR.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; } return energy; } /*! * \brief computeEFVDir computes the direct space energy, force and virial. This is provided mostly for * debugging and testing purposes; generally the host program should provide the pairwise interactions. \param * pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom * the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param * parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). * For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the direct space energy. */ Real computeEFVDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = dirEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; RealMat dRCopy = deltaR.clone(); auto force = dRCopy.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; virial[0][0] += force[0] * deltaR[0][0]; virial[0][1] += 0.5f * (force[0] * deltaR[0][1] + force[1] * deltaR[0][0]); virial[0][2] += force[1] * deltaR[0][1]; virial[0][3] += 0.5f * (force[0] * deltaR[0][2] + force[2] * deltaR[0][0]); virial[0][4] += 0.5f * (force[1] * deltaR[0][2] + force[2] * deltaR[0][1]); virial[0][5] += force[2] * deltaR[0][2]; } return energy; } /*! * \brief computeEAdj computes the adjusted real space energy which extracts the energy for excluded pairs that * is present in reciprocal space. This is provided mostly for debugging and testing purposes; generally the * host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \return the adjusted energy. */ Real computeEAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); energy += parameters(i, 0) * parameters(j, 0) * adjEFxn_(rSquared, kappaSquared); } return scaleFactor_ * energy; } /*! * \brief computeEFAdj computes the adjusted energy and force. This is provided mostly for debugging and * testing purposes; generally the host program should provide the pairwise interactions. \param pairList dense * list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom the angular * momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param parameters the * list of parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). For a parameter * with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = (L+1)*(L+2)*(L+3)/6 and * the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the adjusted energy. */ Real computeEFAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = adjEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; auto force = deltaR.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; } return energy; } /*! * \brief computeEFVAdj computes the adjusted energy, forces and virial. This is provided mostly for debugging * and testing purposes; generally the host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the adjusted energy. */ Real computeEFVAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = adjEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; RealMat dRCopy = deltaR.clone(); auto force = dRCopy.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; virial[0][0] += force[0] * deltaR[0][0]; virial[0][1] += 0.5f * (force[0] * deltaR[0][1] + force[1] * deltaR[0][0]); virial[0][2] += force[1] * deltaR[0][1]; virial[0][3] += 0.5f * (force[0] * deltaR[0][2] + force[2] * deltaR[0][0]); virial[0][4] += 0.5f * (force[1] * deltaR[0][2] + force[2] * deltaR[0][1]); virial[0][5] += force[2] * deltaR[0][2]; } return energy; } /*! * \brief Computes the full electrostatic potential at atomic sites due to point charges located at those same * sites. The site located at each probe location is neglected, to avoid the resulting singularity. * \param charges * the list of point charges (in e) associated with each particle. * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param potential the array holding the potential. This is * a matrix of dimensions nAtoms x 1. * \param sphericalCutoff the cutoff (in A) applied to the real space summations, * which must be no more than half of the box dimensions. */ void computePAtAtomicSites(const RealMat &charges, const RealMat &coordinates, RealMat &potential, Real sphericalCutoff) { sanityChecks(0, charges, coordinates); // The minumum image convention requires that the cutoff be less than half the minumum box width checkMinimumImageCutoff(sphericalCutoff); size_t nAtoms = coordinates.nRows(); // Direct space, using simple O(N^2) algorithm. This can be improved using a nonbonded list if needed. Real cutoffSquared = sphericalCutoff * sphericalCutoff; Real kappaSquared = kappa_ * kappa_; #pragma omp parallel for num_threads(nThreads_) for (size_t i = 0; i < nAtoms; ++i) { const auto &coordsI = coordinates.row(i); Real *phiPtr = potential[i]; for (size_t j = 0; j < nAtoms; ++j) { // No self interactions are included, to remove the singularity if (i == j) continue; Real qJ = charges[j][0]; const auto &coordsJ = coordinates.row(j); auto RIJ = minimumImageDeltaR(coordsI, coordsJ); Real rSquared = RIJ[0] * RIJ[0] + RIJ[1] * RIJ[1] + RIJ[2] * RIJ[2]; if (rSquared < cutoffSquared) { *phiPtr += scaleFactor_ * qJ * dirEFxn_(rSquared, kappaSquared); } } } // Reciprocal space term filterAtomsAndBuildSplineCache(0, coordinates); auto realGrid = spreadParameters(0, charges); Real *potentialGrid; if (algorithmType_ == AlgorithmType::PME) { auto gridAddress = forwardTransform(realGrid); convolveE(gridAddress); potentialGrid = inverseTransform(gridAddress); } else if (algorithmType_ == AlgorithmType::CompressedPME) { auto gridAddress = compressedForwardTransform(realGrid); convolveE(gridAddress); potentialGrid = compressedInverseTransform(gridAddress); } else { std::logic_error("Unknown algorithm in helpme::computePAtAtomicSites"); } #pragma omp parallel for num_threads(nThreads_) for (size_t atom = 0; atom < nAtoms; ++atom) { const auto &cacheEntry = splineCache_[atom]; const auto &absAtom = cacheEntry.absoluteAtomNumber; probeGridImpl(potentialGrid, 1, cacheEntry.aSpline, cacheEntry.bSpline, cacheEntry.cSpline, potential[absAtom]); } // Self term - back out the contribution from the atoms at each probe site Real prefac = slfEFxn_(0, kappa_, scaleFactor_); for (size_t atom = 0; atom < nAtoms; ++atom) { potential[atom][0] += 2 * prefac * charges[atom][0]; } } /* * \brief Runs a PME reciprocal space calculation, computing the potential and, optionally, its derivatives as * well as the volume dependent part of the virial that comes from the structure factor. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). A negative value indicates that only the shell with |parameterAngMom| is to be considered, * e.g. a value of -2 specifies that only quadrupoles (and not dipoles or charges) will be provided; the input * matrix should have dimensions corresponding only to the number of terms in this shell. * \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param gridPoints the list of grid points at which the potential is needed; can be the same as the * coordinates. * \param derivativeLevel the order of the potential derivatives required; 0 is the potential, 1 is * (minus) the field, etc. A negative value indicates that only the derivative with order |parameterAngMom| * is to be generated, e.g. -2 specifies that only the second derivative (not the potential or its gradient) * will be returned as output. The output matrix should have space for only these terms, accordingly. * \param potential the array holding the potential. This is a matrix of dimensions * nAtoms x nD, where nD is the derivative level requested. See the details fo the parameters argument for * information about ordering of derivative components. N.B. this array is incremented with the potential, not * assigned, so take care to zero it first if only the current results are desired. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. */ void computePVRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, const RealMat &gridPoints, int derivativeLevel, RealMat &potential, RealMat &virial) { computePRecHelper(parameterAngMom, parameters, coordinates, gridPoints, derivativeLevel, potential, virial); } /*! * \brief Runs a PME reciprocal space calculation, computing the potential and, optionally, its derivatives. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). A negative value indicates that only the shell with |parameterAngMom| is to be considered, * e.g. a value of -2 specifies that only quadrupoles (and not dipoles or charges) will be provided; the input * matrix should have dimensions corresponding only to the number of terms in this shell. * \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param gridPoints the list of grid points at which the potential is needed; can be the same as the * coordinates. * \param derivativeLevel the order of the potential derivatives required; 0 is the potential, 1 is * (minus) the field, etc. A negative value indicates that only the derivative with order |parameterAngMom| * is to be generated, e.g. -2 specifies that only the second derivative (not the potential or its gradient) * will be returned as output. The output matrix should have space for only these terms, accordingly. * \param potential the array holding the potential. This is a matrix of dimensions * nAtoms x nD, where nD is the derivative level requested. See the details fo the parameters argument for * information about ordering of derivative components. N.B. this array is incremented with the potential, not * assigned, so take care to zero it first if only the current results are desired. */ void computePRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, const RealMat &gridPoints, int derivativeLevel, RealMat &potential) { RealMat emptyMatrix(0, 0); computePRecHelper(parameterAngMom, parameters, coordinates, gridPoints, derivativeLevel, potential, emptyMatrix); } /*! * \brief Runs a PME reciprocal space calculation, computing energies. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \return the reciprocal space energy. */ Real computeERec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { sanityChecks(parameterAngMom, parameters, coordinates); filterAtomsAndBuildSplineCache(parameterAngMom, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); Real energy; if (algorithmType_ == AlgorithmType::PME) { auto gridAddress = forwardTransform(realGrid); energy = convolveE(gridAddress); } else if (algorithmType_ == AlgorithmType::CompressedPME) { auto gridAddress = compressedForwardTransform(realGrid); energy = convolveE(gridAddress); } else { std::logic_error("Unknown algorithm in helpme::computeERec"); } return energy; } /*! * \brief Runs a PME reciprocal space calculation, computing energies and forces. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the reciprocal space energy. */ Real computeEFRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { sanityChecks(parameterAngMom, parameters, coordinates); // Spline derivative level bumped by 1, for energy gradients. filterAtomsAndBuildSplineCache(parameterAngMom + 1, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); Real energy; if (algorithmType_ == AlgorithmType::PME) { auto gridAddress = forwardTransform(realGrid); energy = convolveE(gridAddress); auto potentialGrid = inverseTransform(gridAddress); probeGrid(potentialGrid, parameterAngMom, parameters, forces); } else if (algorithmType_ == AlgorithmType::CompressedPME) { auto gridAddress = compressedForwardTransform(realGrid); energy = convolveE(gridAddress); auto potentialGrid = compressedInverseTransform(gridAddress); probeGrid(potentialGrid, parameterAngMom, parameters, forces); } else { std::logic_error("Unknown algorithm in helpme::computeEFRec"); } return energy; } /*! * \brief Runs a PME reciprocal space calculation, computing energies, forces and the virial. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the reciprocal space energy. */ Real computeEFVRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { sanityChecks(parameterAngMom, parameters, coordinates); // Spline derivative level bumped by 1, for energy gradients. filterAtomsAndBuildSplineCache(parameterAngMom + 1, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); Real energy; if (algorithmType_ == AlgorithmType::PME) { auto gridAddress = forwardTransform(realGrid); energy = convolveEV(gridAddress, virial); auto potentialGrid = inverseTransform(gridAddress); probeGrid(potentialGrid, parameterAngMom, parameters, forces, virial[0]); } else if (algorithmType_ == AlgorithmType::CompressedPME) { auto gridAddress = compressedForwardTransform(realGrid); Real *convolvedGrid; energy = convolveEV(gridAddress, convolvedGrid, virial); auto potentialGrid = compressedInverseTransform(convolvedGrid); probeGrid(potentialGrid, parameterAngMom, parameters, forces); } else { std::logic_error("Unknown algorithm in helpme::computeEFVRec"); } return energy; } /*! * \brief Runs a PME reciprocal space calculation, computing energies, forces and the virial. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). * \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param inducedDipoles the induced dipoles in the order {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param polarizationType the method used to converged the induced dipoles. * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the reciprocal space energy. */ Real computeEFVRecIsotropicInducedDipoles(int parameterAngMom, const RealMat &parameters, const RealMat &inducedDipoles, PolarizationType polarizationType, const RealMat &coordinates, RealMat &forces, RealMat &virial) { sanityChecks(parameterAngMom, parameters, coordinates); if (parameterAngMom) throw std::runtime_error("Only point charges are allowed in computeEFVRecIsoPolarized() at the moment."); if (polarizationType != PolarizationType::Mutual) throw std::runtime_error("Only mutual (variation) optimized dipoles are supported at the moment."); size_t numAtoms = parameters.nRows(); // Get the potential and field from the permanent moments RealMat potential(numAtoms, 10); RealMat combinedMultipoles(numAtoms, 4); for (int atom = 0; atom < parameters.nRows(); ++atom) { combinedMultipoles[atom][0] = parameters[atom][0]; combinedMultipoles[atom][1] = inducedDipoles[atom][0]; combinedMultipoles[atom][2] = inducedDipoles[atom][1]; combinedMultipoles[atom][3] = inducedDipoles[atom][2]; } computePVRec(1, combinedMultipoles, coordinates, coordinates, 2, potential, virial); double energy = 0; Real *virialPtr = virial.begin(); Real &Vxx = virialPtr[0]; Real &Vxy = virialPtr[1]; Real &Vyy = virialPtr[2]; Real &Vxz = virialPtr[3]; Real &Vyz = virialPtr[4]; Real &Vzz = virialPtr[5]; for (int atom = 0; atom < numAtoms; ++atom) { const Real *dPhi = potential[atom]; double charge = parameters[atom][0]; Real *force = forces[atom]; double phi = dPhi[0]; double phiX = dPhi[1]; double phiY = dPhi[2]; double phiZ = dPhi[3]; double phiXX = dPhi[4]; double phiXY = dPhi[5]; double phiYY = dPhi[6]; double phiXZ = dPhi[7]; double phiYZ = dPhi[8]; double phiZZ = dPhi[9]; const Real *mu = inducedDipoles[atom]; energy += 0.5 * charge * phi; force[0] += phiXX * mu[0] + phiXY * mu[1] + phiXZ * mu[2]; force[1] += phiXY * mu[0] + phiYY * mu[1] + phiYZ * mu[2]; force[2] += phiXZ * mu[0] + phiYZ * mu[1] + phiZZ * mu[2]; force[0] += charge * phiX; force[1] += charge * phiY; force[2] += charge * phiZ; Vxx += phiX * mu[0]; Vxy += 0.5 * (phiX * mu[1] + phiY * mu[0]); Vyy += phiY * mu[1]; Vxz += 0.5 * (phiX * mu[2] + phiZ * mu[0]); Vyz += 0.5 * (phiY * mu[2] + phiZ * mu[1]); Vzz += phiZ * mu[2]; } return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing the energy. The direct space * implementation here is not totally optimal, so this routine should primarily be used for testing and * debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN,jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the full PME energy. */ Real computeEAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeERec(parameterAngMom, parameters, coordinates); energy += computeESlf(parameterAngMom, parameters); energy += computeEDir(includedList, parameterAngMom, parameters, coordinates); energy += computeEAdj(excludedList, parameterAngMom, parameters, coordinates); return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing energies and forces. The direct * space implementation here is not totally optimal, so this routine should primarily be used for testing * and debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the full PME energy. */ Real computeEFAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeEFRec(parameterAngMom, parameters, coordinates, forces); energy += computeESlf(parameterAngMom, parameters); energy += computeEFDir(includedList, parameterAngMom, parameters, coordinates, forces); energy += computeEFAdj(excludedList, parameterAngMom, parameters, coordinates, forces); return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing energies, forces and virials. * The direct space implementation here is not totally optimal, so this routine should primarily * be used for testing and debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). * \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the full PME energy. */ Real computeEFVAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeEFVRec(parameterAngMom, parameters, coordinates, forces, virial); energy += computeESlf(parameterAngMom, parameters); energy += computeEFVDir(includedList, parameterAngMom, parameters, coordinates, forces, virial); energy += computeEFVAdj(excludedList, parameterAngMom, parameters, coordinates, forces, virial); return energy; } /*! * \brief setup initializes this object for a PME calculation using only threading. * This may be called repeatedly without compromising performance. * \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive * dispersion). * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed). * \param dimA the dimension of the FFT grid along the A axis. * \param dimB the dimension of the FFT grid along the B axis. * \param dimC the dimension of the FFT grid along the C axis. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads * are used. */ void setup(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads) { setupCalculationMetadata(rPower, kappa, splineOrder, dimA, dimB, dimC, dimA, dimB, dimC, scaleFactor, nThreads, 0, NodeOrder::ZYX, 1, 1, 1); } /*! * \brief setupCompressed initializes this object for a compressed PME calculation using only threading. * This may be called repeatedly without compromising performance. * \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive * dispersion). * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed). * \param dimA the dimension of the FFT grid along the A axis. * \param dimB the dimension of the FFT grid along the B axis. * \param dimC the dimension of the FFT grid along the C axis. * \param maxKA the maximum K value in the reciprocal sum along the A axis. * \param maxKB the maximum K value in the reciprocal sum along the B axis. * \param maxKC the maximum K value in the reciprocal sum along the C axis. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads * are used. */ void setupCompressed(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, int maxKA, int maxKB, int maxKC, Real scaleFactor, int nThreads) { setupCalculationMetadata(rPower, kappa, splineOrder, dimA, dimB, dimC, maxKA, maxKB, maxKC, scaleFactor, nThreads, 0, NodeOrder::ZYX, 1, 1, 1); } #if HAVE_MPI == 1 /*! * \brief setupParallel initializes this object for a conventional PME calculation using MPI parallism * and threading. This may be called repeatedly without compromising performance. * \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive * dispersion). * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed). * \param dimA the dimension of the FFT grid along the A axis. * \param dimB the dimension of the FFT grid along the B axis. * \param dimC the dimension of the FFT grid along the C axis. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads * are used. * \param communicator the MPI communicator for the reciprocal space calcultion, which should already be * initialized. * \param numNodesA the number of nodes to be used for the A dimension. * \param numNodesB the number of nodes to be used for the B dimension. * \param numNodesC the number of nodes to be used for the C dimension. */ void setupParallel(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads, const MPI_Comm &communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC) { setupCalculationMetadata(rPower, kappa, splineOrder, dimA, dimB, dimC, dimA, dimB, dimC, scaleFactor, nThreads, (void *)&communicator, nodeOrder, numNodesA, numNodesB, numNodesC); } /*! * \brief setupCompressedParallel initializes this object for a compressed PME calculation using MPI parallism * and threading. This may be called repeatedly without compromising performance. * \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive * dispersion). * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed). * \param dimA the dimension of the FFT grid along the A axis. * \param dimB the dimension of the FFT grid along the B axis. * \param dimC the dimension of the FFT grid along the C axis. * \param maxKA the maximum K value in the reciprocal sum along the A axis. * \param maxKB the maximum K value in the reciprocal sum along the B axis. * \param maxKC the maximum K value in the reciprocal sum along the C axis. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads * are used. * \param communicator the MPI communicator for the reciprocal space calcultion, which should already be * initialized. * \param numNodesA the number of nodes to be used for the A dimension. * \param numNodesB the number of nodes to be used for the B dimension. * \param numNodesC the number of nodes to be used for the C dimension. */ void setupCompressedParallel(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, int maxKA, int maxKB, int maxKC, Real scaleFactor, int nThreads, const MPI_Comm &communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC) { setupCalculationMetadata(rPower, kappa, splineOrder, dimA, dimB, dimC, maxKA, maxKB, maxKC, scaleFactor, nThreads, (void *)&communicator, nodeOrder, numNodesA, numNodesB, numNodesC); } #endif }; } // Namespace helpme using PMEInstanceD = helpme::PMEInstance<double>; using PMEInstanceF = helpme::PMEInstance<float>; #else // C header #include <stddef.h> #if HAVE_MPI == 1 #include <mpi.h> #endif typedef enum { Undefined = 0, XAligned = 1, ShapeMatrix = 2 } LatticeType; typedef enum { /* Undefined comes from the above scope */ ZYX = 1 } NodeOrder; typedef enum { Mutual = 0 } PolarizationType; typedef struct PMEInstance PMEInstance; extern struct PMEInstance *helpme_createD(); extern struct PMEInstance *helpme_createF(); extern void helpme_destroyD(struct PMEInstance *pme); extern void helpme_destroyF(struct PMEInstance *pme); extern void helpme_setupD(struct PMEInstance *pme, int rPower, double kappa, int splineOrder, int aDim, int bDim, int cDim, double scaleFactor, int nThreads); extern void helpme_setupF(struct PMEInstance *pme, int rPower, float kappa, int splineOrder, int aDim, int bDim, int cDim, float scaleFactor, int nThreads); extern void helpme_setup_compressedD(struct PMEInstance *pme, int rPower, double kappa, int splineOrder, int aDim, int bDim, int cDim, int maxKA, int maxKB, int maxKC, double scaleFactor, int nThreads); extern void helpme_setup_compressedF(struct PMEInstance *pme, int rPower, float kappa, int splineOrder, int aDim, int bDim, int cDim, int maxKA, int maxKB, int maxKC, float scaleFactor, int nThreads); #if HAVE_MPI == 1 extern void helpme_setup_parallelD(PMEInstance *pme, int rPower, double kappa, int splineOrder, int dimA, int dimB, int dimC, double scaleFactor, int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC); extern void helpme_setup_parallelF(PMEInstance *pme, int rPower, float kappa, int splineOrder, int dimA, int dimB, int dimC, float scaleFactor, int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC); extern void helpme_setup_compressed_parallelD(PMEInstance *pme, int rPower, double kappa, int splineOrder, int dimA, int dimB, int dimC, int maxKA, int maxKB, int maxKC, double scaleFactor, int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC); extern void helpme_setup_compressed_parallelF(PMEInstance *pme, int rPower, float kappa, int splineOrder, int dimA, int dimB, int dimC, int maxKA, int maxKB, int maxKC, float scaleFactor, int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC); #endif // HAVE_MPI extern void helpme_set_lattice_vectorsD(struct PMEInstance *pme, double A, double B, double C, double kappa, double beta, double gamma, LatticeType latticeType); extern void helpme_set_lattice_vectorsF(struct PMEInstance *pme, float A, float B, float C, float kappa, float beta, float gamma, LatticeType latticeType); extern double helpme_compute_E_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates); extern float helpme_compute_E_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates); extern double helpme_compute_EF_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, double *forces); extern float helpme_compute_EF_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, float *forces); extern double helpme_compute_EFV_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, double *forces, double *virial); extern float helpme_compute_EFV_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, float *forces, float *virial); extern void helpme_compute_P_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, size_t nGridPoints, double *gridPoints, int derivativeLevel, double *potential); extern void helpme_compute_P_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, size_t nGridPoints, float *gridPoints, int derivativeLevel, float *potential); #endif // C++/C #endif // Header guard
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } const_child_range children() const { if (!hasAssociatedStmt()) return const_child_range(const_child_iterator(), const_child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>( const_cast<OMPExecutableDirective *>(this)->getClauses().end()); return const_child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const; Stmt *getStructuredBlock() { return const_cast<Stmt *>( const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock()); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following counters/updates/finals // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters, // PrivateCounters, Inits, // Updates and Finals } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the counters/finals/updates arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } const Stmt *getBody() const { // This relies on the loop form is already checked by Sema. const Stmt *Body = getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) { Body = Body->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); } return Body; } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, StartLoc, EndLoc, 0, 1) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, SourceLocation(),SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
pi.c
#include <stdio.h> long long num_passos = 1000000000; double passo; int main(){ int i; double x, pi, soma=0.0; passo = 1.0/(double)num_passos; #pragma omp target map(tofrom:soma) #pragma omp teams distribute parallel for simd private(x) reduction(+:soma) for(i=0; i < num_passos; i++){ x = (i + 0.5)*passo; soma = soma + 4.0/(1.0 + x*x); } pi = soma*passo; printf("O valor de PI é: %f\n", pi); return 0; }
flush.c
////////////////////////////////////////////////////////////// // // flush.c // // Copyright (c) 2017, Hassan Salehe Matar // All rights reserved. // // This file is part of Clanomp. For details, see // https://github.com/hassansalehe/Clanomp. Please also // see the LICENSE file for additional BSD notice // // Redistribution and use in source and binary forms, with // or without modification, are permitted provided that // the following conditions are met: // // * Redistributions of source code must retain the above // copyright notice, this list of conditions and the // following disclaimer. // // * Redistributions in binary form must reproduce the // above copyright notice, this list of conditions and // the following disclaimer in the documentation and/or // other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names // of its contributors may be used to endorse or promote // products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND // CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF // SUCH DAMAGE. // ////////////////////////////////////////////////////////////// // From the OpenMP specification: // // * Makes a thread’s temporary view of memory consistent with // memory and enforces an order on the memory operations of // the variables explicitly specified or implied // // * The binding thread set for a flush region is the encountering // thread. Execution of a flush region affects the memory and // the temporary view of memory of only the thread that executes // the region. It does not affect the temporary view of other // threads. Other threads must themselves execute a flush operation // in order to be guaranteed to observe the effects of the // encountering thread’s flush operation // // * A barrier also implies a flush // // References: // 1. http://www.openmp.org/wp-content/uploads/openmp-examples-4.5.0.pdf // 2. http://www.openmp.org/wp-content/uploads/openmp-4.5.pdf #include <stdio.h> #include <omp.h> int main() { int count = 0; #pragma omp parallel shared(count) { #pragma omp flush(count) count++; #pragma omp flush(count) } printf("Value of count: %d, construct: <flush>\n", count); return 0; }
GrB_Scalar_wait.c
//------------------------------------------------------------------------------ // GrB_Scalar_wait: wait for a scalar to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Finishes all work on a scalar, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GrB_Scalar_wait // finish all work on a scalar ( GrB_Scalar s, GrB_WaitMode waitmode ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE (s, "GrB_Scalar_wait (s, waitmode)") ; GB_RETURN_IF_NULL_OR_FAULTY (s) ; //-------------------------------------------------------------------------- // finish all pending work on the scalar //-------------------------------------------------------------------------- if (waitmode != GrB_COMPLETE && GB_ANY_PENDING_WORK (s)) { GrB_Info info ; GB_BURBLE_START ("GrB_Scalar_wait") ; GB_OK (GB_wait ((GrB_Matrix) s, "scalar", Context)) ; GB_BURBLE_END ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // GxB_Scalar_wait: wait for a scalar to complete (historical) //------------------------------------------------------------------------------ GrB_Info GxB_Scalar_wait // finish all work on a scalar ( GrB_Scalar *s ) { return (GrB_Scalar_wait (*s, GrB_MATERIALIZE)) ; }
c_print_results.c
/*****************************************************************/ /****** C _ P R I N T _ R E S U L T S ******/ /*****************************************************************/ #include <stdlib.h> #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif void c_print_results( char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags ) { int num_threads, max_threads; max_threads = 1; num_threads = 1; /* figure out number of threads used */ #ifdef _OPENMP max_threads = omp_get_max_threads(); #pragma omp parallel shared(num_threads) { #pragma omp master num_threads = omp_get_num_threads(); } #endif printf( "\n\n %s Benchmark Completed\n", name ); printf( " Class = %c\n", class ); if( n3 == 0 ) { long nn = n1; if ( n2 != 0 ) nn *= n2; printf( " Size = %12ld\n", nn ); /* as in IS */ } else printf( " Size = %4dx%4dx%4d\n", n1,n2,n3 ); printf( " Iterations = %12d\n", niter ); printf( " Time in seconds = %12.2f\n", t ); printf( " Total threads = %12d\n", num_threads); printf( " Avail threads = %12d\n", max_threads); if (num_threads != max_threads) printf( " Warning: Threads used differ from threads available\n"); printf( " Mop/s total = %12.2f\n", mops ); printf( " Mop/s/thread = %12.2f\n", mops/(double)num_threads ); printf( " Operation type = %24s\n", optype); if( passed_verification < 0 ) printf( " Verification = NOT PERFORMED\n" ); else if( passed_verification ) printf( " Verification = SUCCESSFUL\n" ); else printf( " Verification = UNSUCCESSFUL\n" ); printf( " Version = %12s\n", npbversion ); printf( " Compile date = %12s\n", compiletime ); printf( "\n Compile options:\n" ); printf( " CC = %s\n", cc ); printf( " CLINK = %s\n", clink ); printf( " C_LIB = %s\n", c_lib ); printf( " C_INC = %s\n", c_inc ); printf( " CFLAGS = %s\n", cflags ); printf( " CLINKFLAGS = %s\n", clinkflags ); printf( "\n--------------------------------------\n"); printf( " Please send all errors/feedbacks to:\n"); printf( " Center for Manycore Programming\n"); printf( " cmp@aces.snu.ac.kr\n"); printf( " http://aces.snu.ac.kr\n"); printf( "--------------------------------------\n"); }
DetailedPlaceDB.h
/** * @file DetailedPlaceDB.h * @author Yibo Lin * @date Jan 2019 */ #ifndef _DREAMPLACE_UTILITY_DETAILEDPLACEDB_H #define _DREAMPLACE_UTILITY_DETAILEDPLACEDB_H #include "utility/src/Msg.h" #include "utility/src/Box.h" #include "legality_check/src/legality_check.h" #include "draw_place/src/draw_place.h" DREAMPLACE_BEGIN_NAMESPACE template <typename T> struct Space { T xl; T xh; }; struct BinMapIndex { int bin_id; int sub_id; }; struct RowMapIndex { int row_id; int sub_id; }; /// @brief a wrapper class of required data for detailed placement template <typename T> struct DetailedPlaceDB { typedef T type; const T* init_x; const T* init_y; const T* node_size_x; const T* node_size_y; const T* flat_region_boxes; ///< number of boxes x 4 const int* flat_region_boxes_start; ///< number of regions + 1 const int* node2fence_region_map; ///< length of number of movable cells T* x; T* y; const int* flat_net2pin_map; const int* flat_net2pin_start_map; const int* pin2net_map; const int* flat_node2pin_map; const int* flat_node2pin_start_map; const int* pin2node_map; const T* pin_offset_x; const T* pin_offset_y; const unsigned char* net_mask; T xl; T yl; T xh; T yh; T site_width; T row_height; T bin_size_x; T bin_size_y; int num_bins_x; int num_bins_y; int num_sites_x; int num_sites_y; int num_nodes; int num_movable_nodes; int num_nets; int num_pins; int num_regions; ///< number of regions for flat_region_boxes and flat_region_boxes_start inline int pos2site_x(T xx) const { int sx = (xx-xl)/site_width; sx = std::max(sx, 0); sx = std::min(sx, num_sites_x-1); return sx; } inline int pos2site_y(T yy) const { int sy = (yy-yl)/row_height; sy = std::max(sy, 0); sy = std::min(sy, num_sites_y-1); return sy; } /// @brief site index as an upper bound inline int pos2site_ub_x(T xx) const { int sx = ceil((xx-xl)/site_width); sx = std::max(sx, 1); sx = std::min(sx, num_sites_x); return sx; } /// @brief site index as an upper bound inline int pos2site_ub_y(T yy) const { int sy = ceil((yy-yl)/row_height); sy = std::max(sy, 1); sy = std::min(sy, num_sites_y); return sy; } inline int pos2bin_x(T xx) const { int bx = (xx-xl)/bin_size_x; bx = std::max(bx, 0); bx = std::min(bx, num_bins_x-1); return bx; } inline int pos2bin_y(T yy) const { int by = (yy-yl)/bin_size_y; by = std::max(by, 0); by = std::min(by, num_bins_y-1); return by; } inline void shift_box_to_layout(Box<T>& box) const { box.xl = std::max(box.xl, xl); box.xl = std::min(box.xl, xh); box.xh = std::max(box.xh, xl); box.xh = std::min(box.xh, xh); box.yl = std::max(box.yl, yl); box.yl = std::min(box.yl, yh); box.yh = std::max(box.yh, yl); box.yh = std::min(box.yh, yh); } inline Box<int> box2sitebox(const Box<T>& box) const { // xh, yh are exclusive Box<int> sitebox ( pos2site_x(box.xl), pos2site_y(box.yl), pos2site_ub_x(box.xh), pos2site_ub_y(box.yh) ); return sitebox; } inline Box<int> box2binbox(const Box<T>& box) const { Box<int> binbox ( pos2bin_x(box.xl), pos2bin_y(box.yl), pos2bin_x(box.xh), pos2bin_y(box.yh) ); return binbox; } /// @brief align x coordinate to site inline T align2site(T xx) const { return floor((xx-xl)/site_width)*site_width+xl; } /// @brief compute optimal region for a cell /// The method to compute optimal region ignores the pin offsets of the target cell. /// If we want to consider the pin offsets, there may not be feasible box for the optimal region. /// Thus, this is just an approximate optimal region. /// When using the optimal region, one needs to refer to the center of the cell to the region, or the region completely covers the entire cell. Box<T> compute_optimal_region(int node_id) const { Box<T> box ( std::numeric_limits<T>::max(), std::numeric_limits<T>::max(), -std::numeric_limits<T>::max(), -std::numeric_limits<T>::max() ); for (int node2pin_id = flat_node2pin_start_map[node_id]; node2pin_id < flat_node2pin_start_map[node_id+1]; ++node2pin_id) { int node_pin_id = flat_node2pin_map[node2pin_id]; int net_id = pin2net_map[node_pin_id]; if (net_mask[net_id]) { for (int net2pin_id = flat_net2pin_start_map[net_id]; net2pin_id < flat_net2pin_start_map[net_id+1]; ++net2pin_id) { int net_pin_id = flat_net2pin_map[net2pin_id]; int other_node_id = pin2node_map[net_pin_id]; if (node_id != other_node_id) { box.xl = std::min(box.xl, x[other_node_id]+pin_offset_x[net_pin_id]); box.xh = std::max(box.xh, x[other_node_id]+pin_offset_x[net_pin_id]); box.yl = std::min(box.yl, y[other_node_id]+pin_offset_y[net_pin_id]); box.yh = std::max(box.yh, y[other_node_id]+pin_offset_y[net_pin_id]); } } } } shift_box_to_layout(box); return box; } /// @brief compute HPWL for a net T compute_net_hpwl(int net_id) const { Box<T> box ( std::numeric_limits<T>::max(), std::numeric_limits<T>::max(), -std::numeric_limits<T>::max(), -std::numeric_limits<T>::max() ); for (int net2pin_id = flat_net2pin_start_map[net_id]; net2pin_id < flat_net2pin_start_map[net_id+1]; ++net2pin_id) { int net_pin_id = flat_net2pin_map[net2pin_id]; int other_node_id = pin2node_map[net_pin_id]; box.xl = std::min(box.xl, x[other_node_id]+pin_offset_x[net_pin_id]); box.xh = std::max(box.xh, x[other_node_id]+pin_offset_x[net_pin_id]); box.yl = std::min(box.yl, y[other_node_id]+pin_offset_y[net_pin_id]); box.yh = std::max(box.yh, y[other_node_id]+pin_offset_y[net_pin_id]); } if (box.xl == std::numeric_limits<T>::max() || box.yl == std::numeric_limits<T>::max()) { return (T)0; } return (box.xh-box.xl) + (box.yh-box.yl); } /// @brief compute HPWL for all nets T compute_total_hpwl() const { //dreamplacePrint(kDEBUG, "start compute_total_hpwl\n"); T total_hpwl = 0; for (int net_id = 0; net_id < num_nets; ++net_id) { //if (net_mask[net_id]) { total_hpwl += compute_net_hpwl(net_id); } } //dreamplacePrint(kDEBUG, "end compute_total_hpwl\n"); return total_hpwl; } /// @brief distribute cells to rows void make_row2node_map(const T* vx, const T* vy, std::vector<std::vector<int> >& row2node_map, int num_threads) const { // distribute cells to rows for (int i = 0; i < num_nodes; ++i) { //T node_xl = vx[i]; T node_yl = vy[i]; //T node_xh = node_xl+node_size_x[i]; T node_yh = node_yl+node_size_y[i]; int row_idxl = (node_yl-yl)/row_height; int row_idxh = ceil((node_yh-yl)/row_height)+1; row_idxl = std::max(row_idxl, 0); row_idxh = std::min(row_idxh, num_sites_y); for (int row_id = row_idxl; row_id < row_idxh; ++row_id) { T row_yl = yl+row_id*row_height; T row_yh = row_yl+row_height; if (node_yl < row_yh && node_yh > row_yl) // overlap with row { row2node_map[row_id].push_back(i); } } } // sort cells within rows // it is safer to sort by center // sometimes there might be cells with 0 sizes #ifdef _OPENMP #pragma omp parallel for num_threads (num_threads) schedule(dynamic, 1) #endif for (int i = 0; i < num_sites_y; ++i) { auto& row2nodes = row2node_map[i]; // sort cells within rows according to left edges std::sort(row2nodes.begin(), row2nodes.end(), [&] (int node_id1, int node_id2) { T x1 = vx[node_id1]; T x2 = vx[node_id2]; return x1 < x2 || (x1 == x2 && node_id1 < node_id2); }); // After sorting by left edge, // there is a special case for fixed cells where // one fixed cell is completely within another in a row. // This will cause failure to detect some overlaps. // We need to remove the "small" fixed cell that is inside another. if (!row2nodes.empty()) { std::vector<int> tmp_nodes; tmp_nodes.reserve(row2nodes.size()); tmp_nodes.push_back(row2nodes.front()); for (int j = 1, je = row2nodes.size(); j < je; ++j) { int node_id1 = row2nodes.at(j-1); int node_id2 = row2nodes.at(j); // two fixed cells if (node_id1 >= num_movable_nodes && node_id2 >= num_movable_nodes) { T xl1 = vx[node_id1]; T xl2 = vx[node_id2]; T width1 = node_size_x[node_id1]; T width2 = node_size_x[node_id2]; T xh1 = xl1 + width1; T xh2 = xl2 + width2; // only collect node_id2 if its right edge is righter than node_id1 if (xh1 < xh2) { tmp_nodes.push_back(node_id2); } } else { tmp_nodes.push_back(node_id2); } } row2nodes.swap(tmp_nodes); // sort according to center std::sort(row2nodes.begin(), row2nodes.end(), [&] (int node_id1, int node_id2) { T x1 = vx[node_id1] + node_size_x[node_id1]/2; T x2 = vx[node_id2] + node_size_x[node_id2]/2; return x1 < x2 || (x1 == x2 && node_id1 < node_id2); }); } } } /// @brief distribute movable cells to bins void make_bin2node_map(const T* host_x, const T* host_y, const T* host_node_size_x, const T* host_node_size_y, std::vector<std::vector<int> >& bin2node_map, std::vector<BinMapIndex>& node2bin_map) const { // construct bin2node_map for (int i = 0; i < num_movable_nodes; ++i) { int node_id = i; T node_x = host_x[node_id] + host_node_size_x[node_id]/2; T node_y = host_y[node_id] + host_node_size_y[node_id]/2; int bx = std::min(std::max((int)((node_x-xl)/bin_size_x), 0), num_bins_x-1); int by = std::min(std::max((int)((node_y-yl)/bin_size_y), 0), num_bins_y-1); int bin_id = bx*num_bins_y+by; //int sub_id = bin2node_map.at(bin_id).size(); bin2node_map.at(bin_id).push_back(node_id); } // construct node2bin_map for (unsigned int bin_id = 0; bin_id < bin2node_map.size(); ++bin_id) { for (unsigned int sub_id = 0; sub_id < bin2node_map[bin_id].size(); ++sub_id) { int node_id = bin2node_map[bin_id][sub_id]; BinMapIndex& bm_idx = node2bin_map.at(node_id); bm_idx.bin_id = bin_id; bm_idx.sub_id = sub_id; } } #ifdef DEBUG int max_num_nodes_per_bin = 0; for (unsigned int i = 0; i < bin2node_map.size(); ++i) { max_num_nodes_per_bin = std::max(max_num_nodes_per_bin, (int)bin2node_map[i].size()); } printf("[D] max_num_nodes_per_bin = %d\n", max_num_nodes_per_bin); #endif } /// @brief check whether placement is legal bool check_legality() const { return legalityCheckKernelCPU( x, y, node_size_x, node_size_y, flat_region_boxes, flat_region_boxes_start, node2fence_region_map, xl, yl, xh, yh, site_width, row_height, num_nodes, num_movable_nodes, num_regions ); } /// @brief check whether a cell is within its fence region bool inside_fence(int node_id, T xx, T yy) const { T node_xl = xx; T node_yl = yy; T node_xh = node_xl + node_size_x[node_id]; T node_yh = node_yl + node_size_y[node_id]; bool legal_flag = true; int region_id = node2fence_region_map[node_id]; if (region_id < num_regions) { int box_bgn = flat_region_boxes_start[region_id]; int box_end = flat_region_boxes_start[region_id + 1]; T node_area = (node_xh - node_xl) * (node_yh - node_yl); // I assume there is no overlap between boxes of a region // otherwise, preprocessing is required for (int box_id = box_bgn; box_id < box_end; ++box_id) { int box_offset = box_id*4; T box_xl = flat_region_boxes[box_offset]; T box_yl = flat_region_boxes[box_offset + 1]; T box_xh = flat_region_boxes[box_offset + 2]; T box_yh = flat_region_boxes[box_offset + 3]; T dx = std::max(std::min(node_xh, box_xh) - std::max(node_xl, box_xl), (T)0); T dy = std::max(std::min(node_yh, box_yh) - std::max(node_yl, box_yl), (T)0); T overlap = dx*dy; if (overlap > 0) { node_area -= overlap; } } if (node_area > 0) // not consumed by boxes within a region { legal_flag = false; } } return legal_flag; } /// @brief draw placement void draw_place(const char* filename) const { drawPlaceLauncher<T>( x, y, node_size_x, node_size_y, pin_offset_x, pin_offset_y, pin2node_map, num_nodes, num_movable_nodes, 0, flat_net2pin_start_map[num_nets], xl, yl, xh, yh, site_width, row_height, bin_size_x, bin_size_y, filename ); } }; DREAMPLACE_END_NAMESPACE #endif
seqReduction.c
#include <omp.h> #include <stdio.h> #define SIZE 2000000 main () { int i, n, chunk; float a[SIZE], b[SIZE], result; /* Some initializations */ n = SIZE; chunk = 10; result = 0.0; for (i=0; i < n; i++) { a[i] = i * 1.0; b[i] = i * 2.0; } // #pragma omp parallel for \ // default(shared) private(i) \ // schedule(static,chunk) \ // reduction(+:result) for (i=0; i < n; i++) result = result + (a[i] * b[i]); printf("Final result= %f\n",result); }
util.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "thd_info.h" #include "util.h" #include <omp.h> /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ val_t rand_val(void) { /* TODO: modify this to work based on the size of idx_t */ val_t v = 1.0 * ((val_t) rand() / (val_t) RAND_MAX); if(rand() % 2 == 0) { v *= -1; } return v; } idx_t rand_idx(void) { /* TODO: modify this to work based on the size of idx_t */ return (idx_t) (rand() << 16) | rand(); } void fill_rand( val_t * const restrict vals, idx_t const nelems) { for(idx_t i=0; i < nelems; ++i) { vals[i] = rand_val(); } } char * bytes_str( size_t const bytes) { double size = (double)bytes; int suff = 0; const char *suffix[5] = {"B", "KB", "MB", "GB", "TB"}; while(size > 1024 && suff < 5) { size /= 1024.; ++suff; } char * ret = NULL; if(asprintf(&ret, "%0.2f%s", size, suffix[suff]) == -1) { fprintf(stderr, "SPLATT: asprintf failed with %zu bytes.\n", bytes); ret = NULL; } return ret; } idx_t argmax_elem( idx_t const * const arr, idx_t const N) { idx_t mkr = 0; for(idx_t i=1; i < N; ++i) { if(arr[i] > arr[mkr]) { mkr = i; } } return mkr; } idx_t argmin_elem( idx_t const * const arr, idx_t const N) { idx_t mkr = 0; for(idx_t i=1; i < N; ++i) { if(arr[i] < arr[mkr]) { mkr = i; } } return mkr; } int * get_primes( int N, int * nprimes) { int size = 10; int * p = (int *) splatt_malloc(size * sizeof(int)); int np = 0; while(N != 1) { int i; for(i=2; i <= N; ++i) { if(N % i == 0) { /* found the next prime */ break; } } /* realloc if necessary */ if(size == np) { p = (int *) realloc(p, size * 2 * sizeof(int)); } p[np++] = i; N /= i; } *nprimes = np; return p; } void par_memcpy( void * const restrict dst, void const * const restrict src, size_t const bytes) { #pragma omp parallel { int nthreads = splatt_omp_get_num_threads(); int tid = splatt_omp_get_thread_num(); size_t n_per_thread = (bytes + nthreads - 1)/nthreads; size_t n_begin = SS_MIN(n_per_thread * tid, bytes); size_t n_end = SS_MIN(n_begin + n_per_thread, bytes); memcpy((char *)dst + n_begin, (char *)src + n_begin, n_end - n_begin); } }
lrthresh_intel.c
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <limits.h> #include <complex.h> #include <mkl.h> #include <math.h> #include <omp.h> #include <sys/time.h> void mysvthresh(complex float *buf, MKL_INT _M, MKL_INT _N, float *s, complex float *u, complex float *vt, complex float *work, MKL_INT lwork, float *rwork, MKL_INT info, float lambda) { complex float alpha = 1.0f; complex float beta = 0.0f; csyrk("L", "T", &_N, &_M, &alpha, buf, &_M, &beta, vt, &_N); float s_upperbound = 0; for (int i = 0; i < _N; i++) { float s = 0; for (int j = 0; j < _N; j++) { int row = (i > j ? i : j); int col = (i < j ? i : j); s += cabsf(vt[row + col * _N]); } s_upperbound = (s_upperbound > s) ? s_upperbound : s; } if (s_upperbound < lambda * lambda) { for (int bj = 0; bj < _N; bj++) { #pragma simd for (int bi = 0; bi < _M; bi++) { buf[bi + bj * _M] = 0.; } } return; } cgesvd("S", "S", &_M, &_N, buf, &_M, s, u, &_M, vt, &_N, work, &lwork, rwork, &info); for (int bi = 0; bi < _N; bi++) { float sf = s[bi]; for (int bj = 0; bj < _N; bj++) { vt[bi + bj * _N] *= (sf < lambda) ? 0.f : sf - lambda; } } cgemm("N", "N", &_M, &_N, &_N, &alpha, u, &_M, vt, &_N, &beta, buf, &_M); } void qrmysvthresh(complex float *buf, MKL_INT _M, MKL_INT _N, float *s, complex float *u, complex float *vt, complex float *q, complex float *tau, complex float *r, complex float *work, MKL_INT lwork, float *rwork, MKL_INT info, float lambda) { complex float alpha = 1.0f; complex float beta = 0.0f; complex float zerocheck = 0.; for(int i = 0 ; i < _M*_N ; i++) { zerocheck += buf[i]; } if (zerocheck == 0.) { for (int bj = 0; bj < _N; bj++) { #pragma simd for (int bi = 0; bi < _M; bi++) { buf[bi + bj * _M] = 0.; } } return; } // 1. QR of A, and get R // Init R to zeroes for (int i = 0; i < _N; ++i) for (int j = 0; j < _N; ++j) r[i + j * _N] = .0; clacpy("N", &_M, &_N, buf, &_M, q, &_M); cgeqrf(&_M, &_N, q, &_M, tau, work, &lwork, &info); clacpy("U", &_M, &_N, q, &_M, r, &_N); // 2. Syrk to check for early stop csyrk("L", "T", &_N, &_N, &alpha, r, &_N, &beta, buf, &_N); float s_upperbound = 0; for (int i = 0; i < _N; i++) { float s = 0; for (int j = 0; j < _N; j++) { int row = (i > j ? i : j); int col = (i < j ? i : j); s += cabsf(buf[row + col * _N]); } s_upperbound = (s_upperbound > s) ? s_upperbound : s; } if (s_upperbound < lambda * lambda) { for (int bj = 0; bj < _N; bj++) { #pragma simd for (int bi = 0; bi < _M; bi++) { buf[bi + bj * _M] = 0.; } } return; } // 3. SVD of R cgesvd("S", "S", &_N, &_N, r, &_N, s, u, &_N, vt, &_N, work, &lwork, rwork, &info); // 4. THR for (int bi = 0; bi < _N; bi++) { float sf = s[bi]; for (int bj = 0; bj < _N; bj++) { vt[bi + bj * _N] *= (sf < lambda) ? 0.f : sf - lambda; } } // 5. GEMM with USV and last with Q for (int bj = 0; bj < _N; bj++) { #pragma simd for (int bi = 0; bi < _M; bi++) buf[bi + bj * _M] = .0; } cgemm("N", "N", &_N, &_N, &_N, &alpha, u, &_N, vt, &_N, &beta, buf, &_M); cunmqr("L", "N", &_M, &_N, &_N, q, &_M, tau, buf, &_M, work, &lwork, &info); } void mylrthresh(const complex float *mat1, complex float *mat2, float lambda, int M, int N, int nimg, int nmap, int blksize, int shift0, int shift1) { #pragma omp parallel { complex float *buf = (complex float *)malloc(blksize * blksize * nimg * sizeof(complex float)); MKL_INT _M = blksize * blksize; MKL_INT _N = nimg; complex float worksize; MKL_INT lwork = -1; MKL_INT info = 0; float *s = (float *)malloc(_N * sizeof(float)); complex float *u = (complex float *)malloc(_M * _N * sizeof(complex float)); complex float *vt = (complex float *)malloc(_N * _N * sizeof(complex float)); complex float *u_qr = (complex float *)malloc(_N * _N * sizeof(complex float)); complex float *q = (complex float *)malloc(_M * _N * sizeof(complex float)); complex float *r = (complex float *)malloc(_N * _N * sizeof(complex float)); complex float *tau = (complex float *)malloc(_N * sizeof(complex float)); cgesvd("S", "S", &_M, &_N, buf, &_M, s, u, &_M, vt, &_N, &worksize, &lwork, NULL, &info); lwork = (MKL_INT)worksize; complex float *work = (complex float *)malloc(lwork * sizeof(complex float)); float *rwork = (float *)malloc(_N * sizeof(float)); int Mpad = blksize * ((M + blksize - 1) / blksize); int Npad = blksize * ((N + blksize - 1) / blksize); for (int m = 0; m < nmap; m++) { #pragma omp for collapse(2) for (int i = 0 ; i < M; i += blksize) { for (int j = 0 ; j < N; j += blksize) { int shiftedi = i - shift0; int shiftedj = j - shift1; if ((shiftedi >= 0 ) && (shiftedj >=0 ) && (shiftedi + blksize <= M) && (shiftedj + blksize <= N)) { for (int img = 0; img < nimg; img++) { for (int bi = 0; bi < blksize; bi++) { #pragma simd for (int bj = 0; bj < blksize; bj++) { buf[bj + bi * blksize + img * blksize * blksize] = mat1 [shiftedj + bj + (shiftedi + bi) * N + m * M * N + img * nmap * M * N]; } } } } else { for (int img = 0; img < nimg; img++) { for (int bi = 0; bi < blksize; bi++) { for (int bj = 0; bj < blksize; bj++) { int bii = (shiftedi + bi); if(bii < 0) bii = Mpad+bii; bii = bii % M; int bjj = (shiftedj + bj); if(bjj < 0) bjj = Npad+bjj; bjj = bjj % N; buf[bj + bi * blksize + img * blksize * blksize] = mat1[bjj + bii * N + m * M * N + img * nmap * M * N]; } } } } mysvthresh(buf, _M, _N, s, u, vt, work, lwork, rwork, info, lambda); //qrmysvthresh(_buf, _M, _N, _s, _u_qr, _vt, _q, _tau, _r, _work, lwork, _rwork, // info, _lambda); if ((shiftedi >= 0) && (shiftedj >= 0) && (shiftedi + blksize <= M) && (shiftedj + blksize <= N)) { for (int img = 0; img < nimg; img++) { for (int bi = 0; bi < blksize; bi++) { #pragma simd for (int bj = 0; bj < blksize; bj++) { mat2[shiftedj + bj + (shiftedi + bi) * N + m * M * N + img * nmap * M * N] = buf[bj + bi * blksize + img * blksize * blksize]; } } } } else { for (int img = 0; img < nimg; img++) { for (int bi = 0; bi < blksize; bi++) { for (int bj = 0; bj < blksize; bj++) { int bii = (shiftedi + bi); if(bii < 0) bii = Mpad+bii; int bjj = (shiftedj + bj); if(bjj < 0) bjj = Npad+bjj; if((bii >= 0) && (bjj >= 0) && (bii < M) && (bjj < N)) { mat2[bjj + (bii) * N + m * M * N + img * nmap * M * N] = buf[bj + bi * blksize + img * blksize * blksize]; } } } } } } } } free(buf); free(s); free(u); free(vt); free(work); free(rwork); } }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) { for (t4=max(max(max(0,ceild(3*t1-255,256)),ceild(24*t2-Nz-1020,1024)),ceild(32*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(12*t1+Nx+21,1024)),floord(24*t2+Nx+20,1024)),floord(32*t3+Nx+28,1024)),floord(24*t1-24*t2+Nz+Nx+19,1024));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),1024*t4+1022),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
serial_sum.c
double do_sum(double* restrict var, long ncells) { // Serial sum double sum = 0.0; #pragma omp simd reduction(+:sum) for (long i = 0; i < ncells; i++){ sum += var[i]; } return(sum); }
lu.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU This benchmark is an OpenMP C version of the NPB LU code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: S. Weeratunga V. Venkatakrishnan E. Barszcz M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "applu.h" //#if defined(_OPENMP) /* for thread synchronization */ //static boolean flag[ISIZ1/2*2+1]; //#endif /* _OPENMP */ /* function declarations */ #include <omp.h> static void blts(int nx,int ny,int nz,int k,double omega,double v[64][65][65][5],double ldz[64][64][5][5],double ldy[64][64][5][5],double ldx[64][64][5][5],double d[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0); static void buts(int nx,int ny,int nz,int k,double omega,double v[64][65][65][5],double tv[64][64][5],double d[64][64][5][5],double udx[64][64][5][5],double udy[64][64][5][5],double udz[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0); static void domain(); static void erhs(); static void error(); static void exact(int i,int j,int k,double u000ijk[5]); static void jacld(int k); static void jacu(int k); static void l2norm(int nx0,int ny0,int nz0,int ist,int iend,int jst,int jend,double v[64][65][65][5],double sum[5]); static void pintgr(); static void read_input(); static void rhs(); static void setbv(); static void setcoeff(); static void setiv(); static void ssor(); static void verify(double xcr[5],double xce[5],double xci,char *class,boolean *verified); /*-------------------------------------------------------------------- program applu --------------------------------------------------------------------*/ int main(int argc,char **argv) { /*-------------------------------------------------------------------- c c driver for the performance evaluation of the solver for c five coupled parabolic/elliptic partial differential equations. c --------------------------------------------------------------------*/ char class; boolean verified; double mflops; int nthreads = 1; /*-------------------------------------------------------------------- c read input data --------------------------------------------------------------------*/ read_input(); /*-------------------------------------------------------------------- c set up domain sizes --------------------------------------------------------------------*/ domain(); /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ setcoeff(); /*-------------------------------------------------------------------- c set the boundary values for dependent variables --------------------------------------------------------------------*/ setbv(); /*-------------------------------------------------------------------- c set the initial values for dependent variables --------------------------------------------------------------------*/ setiv(); /*-------------------------------------------------------------------- c compute the forcing term based on prescribed exact solution --------------------------------------------------------------------*/ erhs(); { //#if defined(_OPENMP) // nthreads = omp_get_num_threads(); //#endif /* _OPENMP */ } /*-------------------------------------------------------------------- c perform the SSOR iterations --------------------------------------------------------------------*/ ssor(); /*-------------------------------------------------------------------- c compute the solution error --------------------------------------------------------------------*/ error(); /*-------------------------------------------------------------------- c compute the surface integral --------------------------------------------------------------------*/ pintgr(); /*-------------------------------------------------------------------- c verification test --------------------------------------------------------------------*/ verify(rsdnm,errnm,frc,&class,&verified); mflops = ((double )itmax) * (1984.77 * ((double )nx0) * ((double )ny0) * ((double )nz0) - 10923.3 * (((double )(nx0 + ny0 + nz0)) / 3.0 * (((double )(nx0 + ny0 + nz0)) / 3.0)) + 27770.9 * ((double )(nx0 + ny0 + nz0)) / 3.0 - 144010.0) / (maxtime * 1000000.0); c_print_results("LU",class,nx0,ny0,nz0,itmax,nthreads,maxtime,mflops," floating point",verified,"3.0 structured","14 Jan 2020","(none)","(none)","-lm","(none)","(none)","(none)","(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void blts(int nx,int ny,int nz,int k,double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double ldz[64][64][5][5],double ldy[64][64][5][5],double ldx[64][64][5][5],double d[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block lower triangular solution: c c v <-- ( L-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int m; double tmp; double tmp1; double tmat[5][5]; #pragma omp parallel for private (i,j,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (k,omega) for (m = 0; m <= 4; m += 1) { v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]); } } } for (i = ist; i <= iend; i += 1) { //#if defined(_OPENMP) // if (i != ist) { // while (flag[i-1] == 0) { // ; // } // } // if (i != iend) { // while (flag[i] == 1) { // ; // } // } //#endif /* _OPENMP */ for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (omega) for (m = 0; m <= 4; m += 1) { v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]); } /*-------------------------------------------------------------------- c diagonal block inversion c c forward elimination --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } //#if defined(_OPENMP) // if (i != ist) flag[i-1] = 0; // if (i != iend) flag[i] = 1; //#endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void buts(int nx,int ny,int nz,int k,double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double tv[64][64][5],double d[64][64][5][5],double udx[64][64][5][5],double udy[64][64][5][5],double udz[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block upper triangular solution: c c v <-- ( U-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int m; double tmp; double tmp1; double tmat[5][5]; #pragma omp parallel for private (i,j,m) for (i = iend; i >= ist; i += -1) { #pragma omp parallel for private (j,m) for (j = jend; j >= jst; j += -1) { #pragma omp parallel for private (m) firstprivate (k,omega) for (m = 0; m <= 4; m += 1) { tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]); } } } for (i = iend; i >= ist; i += -1) { //#if defined(_OPENMP) // if (i != iend) { // while (flag[i+1] == 0) { // ; // } //// } // if (i != ist) { // while (flag[i] == 1) { // ; // } // } //#endif /* _OPENMP */ for (j = jend; j >= jst; j += -1) { #pragma omp parallel for private (m) firstprivate (omega) for (m = 0; m <= 4; m += 1) { tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c diagonal block inversion --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; tv[i][j][3] = tv[i][j][3] / tmat[3][3]; tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; tv[i][j][2] = tv[i][j][2] / tmat[2][2]; tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; tv[i][j][1] = tv[i][j][1] / tmat[1][1]; tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; tv[i][j][0] = tv[i][j][0] / tmat[0][0]; v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } //#if defined(_OPENMP) // if (i != iend) flag[i+1] = 0; // if (i != ist) flag[i] = 1; //#endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void domain() { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ nx = nx0; ny = ny0; nz = nz0; /*-------------------------------------------------------------------- c check the sub-domain size --------------------------------------------------------------------*/ if (nx < 4 || ny < 4 || nz < 4) { printf(" SUBDOMAIN SIZE IS TOO SMALL - \n ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n TO 4 THEY ARE CURRENTLY%3d%3d%3d\n",nx,ny,nz); exit(1); } if (nx > 64 || ny > 64 || nz > 64) { printf(" SUBDOMAIN SIZE IS TOO LARGE - \n ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n CURRENTLY%4d%4d%4d\n",nx,ny,nz); exit(1); } /*-------------------------------------------------------------------- c set up the start and end in i and j extents for all processors --------------------------------------------------------------------*/ ist = 1; iend = nx - 2; jst = 1; jend = ny - 2; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void erhs() { { /*-------------------------------------------------------------------- c c compute the right hand side based on exact solution c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; int L1; int L2; int ist1; int iend1; int jst1; int jend1; double dsspm; double xi; double eta; double zeta; double q; double u21; double u31; double u41; double tmp; double u21i; double u31i; double u41i; double u51i; double u21j; double u31j; double u41j; double u51j; double u21k; double u31k; double u41k; double u51k; double u21im1; double u31im1; double u41im1; double u51im1; double u21jm1; double u31jm1; double u41jm1; double u51jm1; double u21km1; double u31km1; double u41km1; double u51km1; dsspm = dssp; #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= nx - 1; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= ny - 1; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= nz - 1; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = 0.0; } } } } #pragma omp parallel for private (iglob,jglob,xi,eta,zeta,i,j,k,m) firstprivate (nx0) for (i = 0; i <= nx - 1; i += 1) { iglob = i; xi = ((double )iglob) / (nx0 - 1); #pragma omp parallel for private (jglob,eta,zeta,j,k,m) firstprivate (ny0) for (j = 0; j <= ny - 1; j += 1) { jglob = j; eta = ((double )jglob) / (ny0 - 1); #pragma omp parallel for private (zeta,k,m) for (k = 0; k <= nz - 1; k += 1) { zeta = ((double )k) / (nz - 1); #pragma omp parallel for private (m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx - 1; #pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2) for (i = L1; i <= L2; i += 1) { #pragma omp parallel for private (q,u21,j,k) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u21,k) for (k = 1; k <= nz - 1 - 1; k += 1) { flux[i][j][k][0] = rsd[i][j][k][1]; u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][2] = rsd[i][j][k][2] * u21; flux[i][j][k][3] = rsd[i][j][k][3] * u21; flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,k,m) firstprivate (nx,L2) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (i,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (m) firstprivate (tx2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,tmp,u21i,u31i,u41i,u51i,i) for (i = ist; i <= L2; i += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21i = tmp * rsd[i][j][k][1]; u31i = tmp * rsd[i][j][k][2]; u41i = tmp * rsd[i][j][k][3]; u51i = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i - 1][j][k][0]; u21im1 = tmp * rsd[i - 1][j][k][1]; u31im1 = tmp * rsd[i - 1][j][k][2]; u41im1 = tmp * rsd[i - 1][j][k][3]; u51im1 = tmp * rsd[i - 1][j][k][4]; flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1); flux[i][j][k][2] = tx3 * (u31i - u31im1); flux[i][j][k][3] = tx3 * (u41i - u41im1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } #pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5) for (i = ist; i <= iend; i += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (- 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } ist1 = 3; iend1 = nx - 4; #pragma omp parallel for private (i,m) firstprivate (iend1) for (i = ist1; i <= iend1; i += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny - 1; #pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2) for (i = ist; i <= iend; i += 1) { //firstprivate(iend ,ist ,k ,ny ,u31 ,q ,nz ,L2 ,i ) #pragma omp parallel for private (q,u31,j,k) for (j = L1; j <= L2; j += 1) { #pragma omp parallel for private (q,u31,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = rsd[i][j][k][2]; u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u31; flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][3] = rsd[i][j][k][3] * u31; flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,i,j,k,m) firstprivate (nz) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,j,k,m) firstprivate (ny,L2) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (ty2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,tmp,u21j,u31j,u41j,u51j,j) for (j = jst; j <= L2; j += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21j = tmp * rsd[i][j][k][1]; u31j = tmp * rsd[i][j][k][2]; u41j = tmp * rsd[i][j][k][3]; u51j = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j - 1][k][0]; u21jm1 = tmp * rsd[i][j - 1][k][1]; u31jm1 = tmp * rsd[i][j - 1][k][2]; u41jm1 = tmp * rsd[i][j - 1][k][3]; u51jm1 = tmp * rsd[i][j - 1][k][4]; flux[i][j][k][1] = ty3 * (u21j - u21jm1); flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1); flux[i][j][k][3] = ty3 * (u41j - u41jm1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } #pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5) for (j = jst; j <= jend; j += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (- 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } jst1 = 3; jend1 = ny - 4; #pragma omp parallel for private (j,m) firstprivate (jend1) for (j = jst1; j <= jend1; j += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,i,j,k,m) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u41,k) for (k = 0; k <= nz - 1; k += 1) { flux[i][j][k][0] = rsd[i][j][k][3]; u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u41; flux[i][j][k][2] = rsd[i][j][k][2] * u41; flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tz2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,tmp,u21k,k) for (k = 1; k <= nz - 1; k += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21k = tmp * rsd[i][j][k][1]; u31k = tmp * rsd[i][j][k][2]; u41k = tmp * rsd[i][j][k][3]; u51k = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j][k - 1][0]; u21km1 = tmp * rsd[i][j][k - 1][1]; u31km1 = tmp * rsd[i][j][k - 1][2]; u41km1 = tmp * rsd[i][j][k - 1][3]; u51km1 = tmp * rsd[i][j][k - 1][4]; flux[i][j][k][1] = tz3 * (u21k - u21km1); flux[i][j][k][2] = tz3 * (u31k - u31km1); flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } #pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5) for (k = 1; k <= nz - 2; k += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } #pragma omp parallel for private (k,m) for (k = 3; k <= nz - 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error() { /*-------------------------------------------------------------------- c c compute the solution error c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; double tmp; double u000ijk[5]; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { errnm[m] = 0.0; } for (i = ist; i <= iend; i += 1) { iglob = i; for (j = jst; j <= jend; j += 1) { jglob = j; for (k = 1; k <= nz - 2; k += 1) { exact(iglob,jglob,k,u000ijk); #pragma omp parallel for private (tmp,m) for (m = 0; m <= 4; m += 1) { tmp = u000ijk[m] - u[i][j][k][m]; errnm[m] = errnm[m] + tmp * tmp; } } } } for (m = 0; m <= 4; m += 1) { errnm[m] = sqrt(errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact(int i,int j,int k,double u000ijk[5]) { /*-------------------------------------------------------------------- c c compute the exact solution at (i,j,k) c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int m; double xi; double eta; double zeta; xi = ((double )i) / (nx0 - 1); eta = ((double )j) / (ny0 - 1); zeta = ((double )k) / (nz - 1); #pragma omp parallel for private (m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacld(int k) { /*-------------------------------------------------------------------- c compute the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; double r43; double c1345; double c34; double tmp1; double tmp2; double tmp3; r43 = 4.0 / 3.0; c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; c34 = 1.00e-01 * 1.00e+00; #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt) for (j = jst; j <= jend; j += 1) { /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4])); d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k - 1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = -dt * tz1 * dz1; a[i][j][0][1] = 0.0; a[i][j][0][2] = 0.0; a[i][j][0][3] = -dt * tz2; a[i][j][0][4] = 0.0; a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]); a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; a[i][j][1][2] = 0.0; a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1); a[i][j][1][4] = 0.0; a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]); a[i][j][2][1] = 0.0; a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1); a[i][j][2][4] = 0.0; a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]); a[i][j][3][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * tmp1)); a[i][j][3][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * tmp1)); a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; a[i][j][3][4] = -dt * tz2 * 0.40e+00; a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]); a[i][j][4][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1]; a[i][j][4][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2]; a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3]; a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j - 1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = -dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = -dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]); b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]); b[i][j][2][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * tmp1)); b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; b[i][j][2][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][3] * tmp1)); b[i][j][2][4] = -dt * ty2 * 0.40e+00; b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]); b[i][j][3][1] = 0.0; b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1); b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j - 1][k][1] * u[i][j - 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j - 1][k][2] * u[i][j - 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j - 1][k][3] * u[i][j - 1][k][3]) - c1345 * tmp2 * u[i][j - 1][k][4]); b[i][j][4][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1]; b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2]; b[i][j][4][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3]; b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i - 1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = -dt * tx1 * dx1; c[i][j][0][1] = -dt * tx2; c[i][j][0][2] = 0.0; c[i][j][0][3] = 0.0; c[i][j][0][4] = 0.0; c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]); c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; c[i][j][1][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * tmp1)); c[i][j][1][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * tmp1)); c[i][j][1][4] = -dt * tx2 * 0.40e+00; c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]); c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1); c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; c[i][j][2][3] = 0.0; c[i][j][2][4] = 0.0; c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]); c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1); c[i][j][3][2] = 0.0; c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; c[i][j][3][4] = 0.0; c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i - 1][j][k][1] * u[i - 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][2] * u[i - 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][3] * u[i - 1][j][k][3]) - c1345 * tmp2 * u[i - 1][j][k][4]); c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1]; c[i][j][4][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2]; c[i][j][4][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3]; c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacu(int k) { /*-------------------------------------------------------------------- c compute the upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; double r43; double c1345; double c34; double tmp1; double tmp2; double tmp3; r43 = 4.0 / 3.0; c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; c34 = 1.00e-01 * 1.00e+00; //#if defined(_OPENMP) #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j) firstprivate (ist,jst,jend) for (i = iend; i >= ist; i += -1) { #pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt) for (j = jend; j >= jst; j += -1) { /*#else for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { #endif*/ /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4])); d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i + 1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = -dt * tx1 * dx1; a[i][j][0][1] = dt * tx2; a[i][j][0][2] = 0.0; a[i][j][0][3] = 0.0; a[i][j][0][4] = 0.0; a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]); a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; a[i][j][1][2] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][2] * tmp1)); a[i][j][1][3] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][3] * tmp1)); a[i][j][1][4] = dt * tx2 * 0.40e+00; a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]); a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1); a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; a[i][j][2][3] = 0.0; a[i][j][2][4] = 0.0; a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]); a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1); a[i][j][3][2] = 0.0; a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; a[i][j][3][4] = 0.0; a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i + 1][j][k][1] * u[i + 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][2] * u[i + 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][3] * u[i + 1][j][k][3]) - c1345 * tmp2 * u[i + 1][j][k][4]); a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1]; a[i][j][4][2] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2]; a[i][j][4][3] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3]; a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j + 1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = -dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]); b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]); b[i][j][2][1] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][1] * tmp1)); b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; b[i][j][2][3] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][3] * tmp1)); b[i][j][2][4] = dt * ty2 * 0.40e+00; b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]); b[i][j][3][1] = 0.0; b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1); b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j + 1][k][1] * u[i][j + 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j + 1][k][2] * u[i][j + 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j + 1][k][3] * u[i][j + 1][k][3]) - c1345 * tmp2 * u[i][j + 1][k][4]); b[i][j][4][1] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1]; b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2]; b[i][j][4][3] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3]; b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k + 1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = -dt * tz1 * dz1; c[i][j][0][1] = 0.0; c[i][j][0][2] = 0.0; c[i][j][0][3] = dt * tz2; c[i][j][0][4] = 0.0; c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]); c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; c[i][j][1][2] = 0.0; c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1); c[i][j][1][4] = 0.0; c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]); c[i][j][2][1] = 0.0; c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1); c[i][j][2][4] = 0.0; c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]); c[i][j][3][1] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][1] * tmp1)); c[i][j][3][2] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][2] * tmp1)); c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; c[i][j][3][4] = dt * tz2 * 0.40e+00; c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k + 1][1] * u[i][j][k + 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k + 1][2] * u[i][j][k + 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k + 1][3] * u[i][j][k + 1][3]) - c1345 * tmp2 * u[i][j][k + 1][4]); c[i][j][4][1] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1]; c[i][j][4][2] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2]; c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3]; c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void l2norm(int nx0,int ny0,int nz0,int ist,int iend,int jst,int jend, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double sum[5]) { { /*-------------------------------------------------------------------- c to compute the l2-norm of vector v. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; double sum0 = 0.0; double sum1 = 0.0; double sum2 = 0.0; double sum3 = 0.0; double sum4 = 0.0; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { sum[m] = 0.0; } #pragma omp parallel for private (i,j,k) reduction (+:sum0,sum1,sum2,sum3,sum4) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k) reduction (+:sum0,sum1,sum2,sum3,sum4) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k) reduction (+:sum0,sum1,sum2,sum3,sum4) for (k = 1; k <= nz0 - 2; k += 1) { sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } { sum[0] += sum0; sum[1] += sum1; sum[2] += sum2; sum[3] += sum3; sum[4] += sum4; } for (m = 0; m <= 4; m += 1) { sum[m] = sqrt(sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2))); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pintgr() { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int ibeg; int ifin; int ifin1; int jbeg; int jfin; int jfin1; int iglob; int iglob1; int iglob2; int jglob; int jglob1; int jglob2; /* phi1(0:isiz2+1,0:isiz3+1) */ double phi1[66][66]; /* phi2(0:isiz2+1,0:isiz3+1) */ double phi2[66][66]; double frc1; double frc2; double frc3; /*-------------------------------------------------------------------- c set up the sub-domains for integeration in each processor --------------------------------------------------------------------*/ ibeg = nx; ifin = 0; iglob1 = - 1; iglob2 = nx - 1; if (iglob1 >= ii1 && iglob2 < ii2 + nx) ibeg = 0; if (iglob1 >= ii1 - nx && iglob2 <= ii2) ifin = nx; if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1; if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2; jbeg = ny; jfin = - 1; jglob1 = 0; jglob2 = ny - 1; if (jglob1 >= ji1 && jglob2 < ji2 + ny) jbeg = 0; if (jglob1 > ji1 - ny && jglob2 <= ji2) jfin = ny; if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1; if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2; ifin1 = ifin; jfin1 = jfin; if (ifin1 == ii2) ifin1 = ifin - 1; if (jfin1 == ji2) jfin1 = jfin - 1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } #pragma omp parallel for private (k,iglob,jglob,i,j) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k,jglob,j) for (j = jbeg; j <= jfin; j += 1) { jglob = j; k = ki1; phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]); k = ki2; phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]); } } frc1 = 0.0; #pragma omp parallel for private (i,j) reduction (+:frc1) for (i = ibeg; i <= ifin1; i += 1) { #pragma omp parallel for private (j) reduction (+:frc1) for (j = jbeg; j <= jfin1; j += 1) { frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]); } } frc1 = dxi * deta * frc1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } jglob = jbeg; if (jglob == ji1) { #pragma omp parallel for private (iglob,i,k) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k) for (k = ki1; k <= ki2; k += 1) { phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (u[i][jbeg][k][1] * u[i][jbeg][k][1] + u[i][jbeg][k][2] * u[i][jbeg][k][2] + u[i][jbeg][k][3] * u[i][jbeg][k][3]) / u[i][jbeg][k][0]); } } } jglob = jfin; if (jglob == ji2) { #pragma omp parallel for private (iglob,i,k) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k) for (k = ki1; k <= ki2; k += 1) { phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (u[i][jfin][k][1] * u[i][jfin][k][1] + u[i][jfin][k][2] * u[i][jfin][k][2] + u[i][jfin][k][3] * u[i][jfin][k][3]) / u[i][jfin][k][0]); } } } frc2 = 0.0; #pragma omp parallel for private (i,k) reduction (+:frc2) firstprivate (ifin1) for (i = ibeg; i <= ifin1; i += 1) { #pragma omp parallel for private (k) reduction (+:frc2) for (k = ki1; k <= ki2 - 1; k += 1) { frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]); } } frc2 = dxi * dzeta * frc2; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } iglob = ibeg; if (iglob == ii1) { #pragma omp parallel for private (jglob,j,k) for (j = jbeg; j <= jfin; j += 1) { jglob = j; #pragma omp parallel for private (k) firstprivate (ibeg) for (k = ki1; k <= ki2; k += 1) { phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (u[ibeg][j][k][1] * u[ibeg][j][k][1] + u[ibeg][j][k][2] * u[ibeg][j][k][2] + u[ibeg][j][k][3] * u[ibeg][j][k][3]) / u[ibeg][j][k][0]); } } } iglob = ifin; if (iglob == ii2) { #pragma omp parallel for private (jglob,j,k) firstprivate (jfin) for (j = jbeg; j <= jfin; j += 1) { jglob = j; #pragma omp parallel for private (k) firstprivate (ifin) for (k = ki1; k <= ki2; k += 1) { phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (u[ifin][j][k][1] * u[ifin][j][k][1] + u[ifin][j][k][2] * u[ifin][j][k][2] + u[ifin][j][k][3] * u[ifin][j][k][3]) / u[ifin][j][k][0]); } } } frc3 = 0.0; #pragma omp parallel for private (j,k) reduction (+:frc3) firstprivate (jfin1,ki1,ki2) for (j = jbeg; j <= jfin1; j += 1) { #pragma omp parallel for private (k) reduction (+:frc3) for (k = ki1; k <= ki2 - 1; k += 1) { frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]); } } frc3 = deta * dzeta * frc3; frc = 0.25 * (frc1 + frc2 + frc3); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void read_input() { FILE *fp; /*-------------------------------------------------------------------- c if input file does not exist, it uses defaults c ipr = 1 for detailed progress output c inorm = how often the norm is printed (once every inorm iterations) c itmax = number of pseudo time steps c dt = time step c omega 1 over-relaxation factor for SSOR c tolrsd = steady state residual tolerance levels c nx, ny, nz = number of grid points in x, y, z directions --------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - LU Benchmark\n\n"); fp = fopen("inputlu.data","r"); if (fp != ((void *)0)) { printf(" Reading from input file inputlu.data\n"); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d%d",&ipr,&inorm); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d",&itmax); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf",&dt); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf",&omega); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf%lf%lf%lf%lf",&tolrsd[0],&tolrsd[1],&tolrsd[2],&tolrsd[3],&tolrsd[4]); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d%d%d",&nx0,&ny0,&nz0); while(fgetc(fp) != '\n') ; fclose(fp); } else { ipr = 1; inorm = 250; itmax = 250; dt = 2.0; omega = 1.2; tolrsd[0] = 1.0e-8; tolrsd[1] = 1.0e-8; tolrsd[2] = 1.0e-8; tolrsd[3] = 1.0e-8; tolrsd[4] = 1.0e-8; nx0 = 64; ny0 = 64; nz0 = 64; } /*-------------------------------------------------------------------- c check problem size --------------------------------------------------------------------*/ if (nx0 < 4 || ny0 < 4 || nz0 < 4) { printf(" PROBLEM SIZE IS TOO SMALL - \n SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); exit(1); } if (nx0 > 64 || ny0 > 64 || nz0 > 64) { printf(" PROBLEM SIZE IS TOO LARGE - \n NX, NY AND NZ SHOULD BE EQUAL TO \n ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); exit(1); } printf(" Size: %3dx%3dx%3d\n",nx0,ny0,nz0); printf(" Iterations: %3d\n",itmax); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs() { { /*-------------------------------------------------------------------- c compute the right hand sides --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int L1; int L2; int ist1; int iend1; int jst1; int jend1; double q; double u21; double u31; double u41; double tmp; double u21i; double u31i; double u41i; double u51i; double u21j; double u31j; double u41j; double u51j; double u21k; double u31k; double u41k; double u51k; double u21im1; double u31im1; double u41im1; double u51im1; double u21jm1; double u31jm1; double u41jm1; double u51jm1; double u21km1; double u31km1; double u41km1; double u51km1; #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= nx - 1; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= ny - 1; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= nz - 1; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = -frct[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx - 1; #pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2) for (i = L1; i <= L2; i += 1) { #pragma omp parallel for private (q,u21,j,k) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u21,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = u[i][j][k][1]; u21 = u[i][j][k][1] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][2] = u[i][j][k][2] * u21; flux[i][j][k][3] = u[i][j][k][3] * u21; flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21; } } } #pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,k,m) firstprivate (nx) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (i,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (m) firstprivate (tx2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } L2 = nx - 1; #pragma omp parallel for private (tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i) firstprivate (L2) for (i = ist; i <= L2; i += 1) { tmp = 1.0 / u[i][j][k][0]; u21i = tmp * u[i][j][k][1]; u31i = tmp * u[i][j][k][2]; u41i = tmp * u[i][j][k][3]; u51i = tmp * u[i][j][k][4]; tmp = 1.0 / u[i - 1][j][k][0]; u21im1 = tmp * u[i - 1][j][k][1]; u31im1 = tmp * u[i - 1][j][k][2]; u41im1 = tmp * u[i - 1][j][k][3]; u51im1 = tmp * u[i - 1][j][k][4]; flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1); flux[i][j][k][2] = tx3 * (u31i - u31im1); flux[i][j][k][3] = tx3 * (u41i - u41im1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } #pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5) for (i = ist; i <= iend; i += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]); rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (- 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]); } ist1 = 3; iend1 = nx - 4; #pragma omp parallel for private (i,m) firstprivate (iend1) for (i = ist1; i <= iend1; i += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]); rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny - 1; #pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (q,u31,j,k) for (j = L1; j <= L2; j += 1) { #pragma omp parallel for private (q,u31,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = u[i][j][k][2]; u31 = u[i][j][k][2] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u31; flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][3] = u[i][j][k][3] * u31; flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31; } } } #pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,i,j,k,m) firstprivate (nz) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j,k,m) firstprivate (ny) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (ty2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } L2 = ny - 1; #pragma omp parallel for private (tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j) firstprivate (L2) for (j = jst; j <= L2; j += 1) { tmp = 1.0 / u[i][j][k][0]; u21j = tmp * u[i][j][k][1]; u31j = tmp * u[i][j][k][2]; u41j = tmp * u[i][j][k][3]; u51j = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j - 1][k][0]; u21jm1 = tmp * u[i][j - 1][k][1]; u31jm1 = tmp * u[i][j - 1][k][2]; u41jm1 = tmp * u[i][j - 1][k][3]; u51jm1 = tmp * u[i][j - 1][k][4]; flux[i][j][k][1] = ty3 * (u21j - u21jm1); flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1); flux[i][j][k][3] = ty3 * (u41j - u41jm1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } #pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5) for (j = jst; j <= jend; j += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]); rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (- 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]); } jst1 = 3; jend1 = ny - 4; #pragma omp parallel for private (j,m) firstprivate (jend1) for (j = jst1; j <= jend1; j += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]); rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,i,j,k,m) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u41,k) for (k = 0; k <= nz - 1; k += 1) { flux[i][j][k][0] = u[i][j][k][3]; u41 = u[i][j][k][3] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u41; flux[i][j][k][2] = u[i][j][k][2] * u41; flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41; } #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tz2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } #pragma omp parallel for private (tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,k) for (k = 1; k <= nz - 1; k += 1) { tmp = 1.0 / u[i][j][k][0]; u21k = tmp * u[i][j][k][1]; u31k = tmp * u[i][j][k][2]; u41k = tmp * u[i][j][k][3]; u51k = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j][k - 1][0]; u21km1 = tmp * u[i][j][k - 1][1]; u31km1 = tmp * u[i][j][k - 1][2]; u41km1 = tmp * u[i][j][k - 1][3]; u51km1 = tmp * u[i][j][k - 1][4]; flux[i][j][k][1] = tz3 * (u21k - u21km1); flux[i][j][k][2] = tz3 * (u31k - u31km1); flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } #pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5) for (k = 1; k <= nz - 2; k += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]); rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (- 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]); } #pragma omp parallel for private (k,m) for (k = 3; k <= nz - 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]); rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setbv() { { /*-------------------------------------------------------------------- c set the boundary values of dependent variables --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int iglob; int jglob; /*-------------------------------------------------------------------- c set the dependent variable values along the top and bottom faces --------------------------------------------------------------------*/ for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (j = 0; j <= ny - 1; j += 1) { jglob = j; exact(iglob,jglob,0,&u[i][j][0][0]); exact(iglob,jglob,nz - 1,&u[i][j][nz - 1][0]); } } /*-------------------------------------------------------------------- c set the dependent variable values along north and south faces --------------------------------------------------------------------*/ for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (k = 0; k <= nz - 1; k += 1) { exact(iglob,0,k,&u[i][0][k][0]); } } for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (k = 0; k <= nz - 1; k += 1) { exact(iglob,ny0 - 1,k,&u[i][ny - 1][k][0]); } } /*-------------------------------------------------------------------- c set the dependent variable values along east and west faces --------------------------------------------------------------------*/ for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 0; k <= nz - 1; k += 1) { exact(0,jglob,k,&u[0][j][k][0]); } } for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 0; k <= nz - 1; k += 1) { exact(nx0 - 1,jglob,k,&u[nx - 1][j][k][0]); } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setcoeff() { /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ dxi = 1.0 / (nx0 - 1); deta = 1.0 / (ny0 - 1); dzeta = 1.0 / (nz0 - 1); tx1 = 1.0 / (dxi * dxi); tx2 = 1.0 / (2.0 * dxi); tx3 = 1.0 / dxi; ty1 = 1.0 / (deta * deta); ty2 = 1.0 / (2.0 * deta); ty3 = 1.0 / deta; tz1 = 1.0 / (dzeta * dzeta); tz2 = 1.0 / (2.0 * dzeta); tz3 = 1.0 / dzeta; ii1 = 1; ii2 = nx0 - 2; ji1 = 1; ji2 = ny0 - 3; ki1 = 2; ki2 = nz0 - 2; /*-------------------------------------------------------------------- c diffusion coefficients --------------------------------------------------------------------*/ dx1 = 0.75; dx2 = dx1; dx3 = dx1; dx4 = dx1; dx5 = dx1; dy1 = 0.75; dy2 = dy1; dy3 = dy1; dy4 = dy1; dy5 = dy1; dz1 = 1.00; dz2 = dz1; dz3 = dz1; dz4 = dz1; dz5 = dz1; /*-------------------------------------------------------------------- c fourth difference dissipation --------------------------------------------------------------------*/ dssp = ((dx1 > ((dy1 > dz1?dy1 : dz1))?dx1 : ((dy1 > dz1?dy1 : dz1)))) / 4.0; /*-------------------------------------------------------------------- c coefficients of the exact solution to the first pde --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 5.0e-01; ce[0][7] = 2.0e-02; ce[0][8] = 1.0e-02; ce[0][9] = 3.0e-02; ce[0][10] = 5.0e-01; ce[0][11] = 4.0e-01; ce[0][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the second pde --------------------------------------------------------------------*/ ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 1.0e-02; ce[1][8] = 3.0e-02; ce[1][9] = 2.0e-02; ce[1][10] = 4.0e-01; ce[1][11] = 3.0e-01; ce[1][12] = 5.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the third pde --------------------------------------------------------------------*/ ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 4.0e-02; ce[2][8] = 3.0e-02; ce[2][9] = 5.0e-02; ce[2][10] = 3.0e-01; ce[2][11] = 5.0e-01; ce[2][12] = 4.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fourth pde --------------------------------------------------------------------*/ ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 3.0e-02; ce[3][8] = 5.0e-02; ce[3][9] = 4.0e-02; ce[3][10] = 2.0e-01; ce[3][11] = 1.0e-01; ce[3][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fifth pde --------------------------------------------------------------------*/ ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 1.0e-01; ce[4][5] = 4.0e-01; ce[4][6] = 3.0e-01; ce[4][7] = 5.0e-02; ce[4][8] = 4.0e-02; ce[4][9] = 3.0e-02; ce[4][10] = 1.0e-01; ce[4][11] = 3.0e-01; ce[4][12] = 2.0e-01; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setiv() { { /*-------------------------------------------------------------------- c c set the initial values of independent variables based on tri-linear c interpolation of boundary values in the computational space. c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; double xi; double eta; double zeta; double pxi; double peta; double pzeta; double ue_1jk[5]; double ue_nx0jk[5]; double ue_i1k[5]; double ue_iny0k[5]; double ue_ij1[5]; double ue_ijnz[5]; for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 1; k <= nz - 1 - 1; k += 1) { zeta = ((double )k) / (nz - 1); if (jglob != 0 && jglob != ny0 - 1) { eta = ((double )jglob) / (ny0 - 1); for (i = 0; i <= nx - 1; i += 1) { iglob = i; if (iglob != 0 && iglob != nx0 - 1) { xi = ((double )iglob) / (nx0 - 1); exact(0,jglob,k,ue_1jk); exact(nx0 - 1,jglob,k,ue_nx0jk); exact(iglob,0,k,ue_i1k); exact(iglob,ny0 - 1,k,ue_iny0k); exact(iglob,jglob,0,ue_ij1); exact(iglob,jglob,nz - 1,ue_ijnz); #pragma omp parallel for private (pxi,peta,pzeta,m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ssor() { /*-------------------------------------------------------------------- c to perform pseudo-time stepping SSOR iterations c for five nonlinear pde s. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int istep; double tmp; double delunm[5]; double tv[64][64][5]; /*-------------------------------------------------------------------- c begin pseudo-time stepping iterations --------------------------------------------------------------------*/ tmp = 1.0 / (omega * (2.0 - omega)); /*-------------------------------------------------------------------- c initialize a,b,c,d to zero (guarantees that page tables have been c formed, if applicable on given architecture, before timestepping). --------------------------------------------------------------------*/ { #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= 63; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= 63; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { a[i][j][k][m] = 0.0; b[i][j][k][m] = 0.0; c[i][j][k][m] = 0.0; d[i][j][k][m] = 0.0; } } } } } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the L2 norms of newton iteration residuals --------------------------------------------------------------------*/ l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,rsdnm); timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c the timestep loop --------------------------------------------------------------------*/ for (istep = 1; istep <= itmax; istep += 1) { if (istep % 20 == 0 || istep == itmax || istep == 1) { printf(" Time step %4d\n",istep); } { /*-------------------------------------------------------------------- c perform SSOR iteration --------------------------------------------------------------------*/ #pragma omp parallel for private (i,j,k,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (dt) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } for (k = 1; k <= nz - 2; k += 1) { /*-------------------------------------------------------------------- c form the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacld(k); /*-------------------------------------------------------------------- c perform the lower triangular solution --------------------------------------------------------------------*/ blts(nx,ny,nz,k,omega,rsd,a,b,c,d,ist,iend,jst,jend,nx0,ny0); } for (k = nz - 2; k >= 1; k += -1) { /*-------------------------------------------------------------------- c form the strictly upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacu(k); /*-------------------------------------------------------------------- c perform the upper triangular solution --------------------------------------------------------------------*/ buts(nx,ny,nz,k,omega,rsd,tv,d,a,b,c,ist,iend,jst,jend,nx0,ny0); } /*-------------------------------------------------------------------- c update the variables --------------------------------------------------------------------*/ #pragma omp parallel for private (i,j,k,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tmp) for (m = 0; m <= 4; m += 1) { u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /* end parallel */ } /*-------------------------------------------------------------------- c compute the max-norms of newton iteration corrections --------------------------------------------------------------------*/ if (istep % inorm == 0) { l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,delunm); } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the max-norms of newton iteration residuals --------------------------------------------------------------------*/ if (istep % inorm == 0 || istep == itmax) { l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,rsdnm); } /*-------------------------------------------------------------------- c check the newton-iteration residuals against the tolerance levels --------------------------------------------------------------------*/ if (rsdnm[0] < tolrsd[0] && rsdnm[1] < tolrsd[1] && rsdnm[2] < tolrsd[2] && rsdnm[3] < tolrsd[3] && rsdnm[4] < tolrsd[4]) { exit(1); } } timer_stop(1); maxtime = timer_read(1); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(double xcr[5],double xce[5],double xci,char *class,boolean *verified) { /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5]; double xceref[5]; double xciref; double xcrdif[5]; double xcedif[5]; double xcidif; double epsilon; double dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; *class = 'U'; *verified = 1; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { xcrref[m] = 1.0; xceref[m] = 1.0; } xciref = 1.0; if (nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) { *class = 'S'; dtref = 5.0e-1; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xcrref[0] = 1.6196343210976702e-02; xcrref[1] = 2.1976745164821318e-03; xcrref[2] = 1.5179927653399185e-03; xcrref[3] = 1.5029584435994323e-03; xcrref[4] = 3.4264073155896461e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xceref[0] = 6.4223319957960924e-04; xceref[1] = 8.4144342047347926e-05; xceref[2] = 5.8588269616485186e-05; xceref[3] = 5.8474222595157350e-05; xceref[4] = 1.3103347914111294e-03; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xciref = 7.8418928865937083; } else if (nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) { /* SPEC95fp size */ *class = 'W'; dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (33x33x33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xcrref[0] = 0.1236511638192e+02; xcrref[1] = 0.1317228477799e+01; xcrref[2] = 0.2550120713095e+01; xcrref[3] = 0.2326187750252e+01; xcrref[4] = 0.2826799444189e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (33X33X33) grid, --------------------------------------------------------------------*/ xceref[0] = 0.4867877144216; xceref[1] = 0.5064652880982e-01; xceref[2] = 0.9281818101960e-01; xceref[3] = 0.8570126542733e-01; xceref[4] = 0.1084277417792e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (33X33X33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xciref = 0.1161399311023e+02; } else if (nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) { *class = 'A'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 7.7902107606689367e+02; xcrref[1] = 6.3402765259692870e+01; xcrref[2] = 1.9499249727292479e+02; xcrref[3] = 1.7845301160418537e+02; xcrref[4] = 1.8384760349464247e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.9964085685471943e+01; xceref[1] = 2.8194576365003349; xceref[2] = 7.3473412698774742; xceref[3] = 6.7139225687777051; xceref[4] = 7.0715315688392578e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 2.6030925604886277e+01; } else if (nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) { *class = 'B'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 3.5532672969982736e+03; xcrref[1] = 2.6214750795310692e+02; xcrref[2] = 8.8333721850952190e+02; xcrref[3] = 7.7812774739425265e+02; xcrref[4] = 7.3087969592545314e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (102X102X102) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 1.1401176380212709e+02; xceref[1] = 8.1098963655421574; xceref[2] = 2.8480597317698308e+01; xceref[3] = 2.5905394567832939e+01; xceref[4] = 2.6054907504857413e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 4.7887162703308227e+01; } else if (nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) { *class = 'C'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 1.03766980323537846e+04; xcrref[1] = 8.92212458801008552e+02; xcrref[2] = 2.56238814582660871e+03; xcrref[3] = 2.19194343857831427e+03; xcrref[4] = 1.78078057261061185e+04; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (162X162X162) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.15986399716949279e+02; xceref[1] = 1.55789559239863600e+01; xceref[2] = 5.41318863077207766e+01; xceref[3] = 4.82262643154045421e+01; xceref[4] = 4.55902910043250358e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 6.66404553572181300e+01; } else { *verified = 0; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]); xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]); } xcidif = fabs((xci - xciref) / xciref); /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if (( *class) != 'U') { printf("\n Verification being performed for class %1c\n",( *class)); printf(" Accuracy setting for epsilon = %20.13e\n",epsilon); if (fabs(dt - dtref) > epsilon) { *verified = 0; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n",dtref); } } else { printf(" Unknown class\n"); } if (( *class) != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m <= 4; m += 1) { if (( *class) == 'U') { printf(" %2d %20.13e\n",m,xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = 0; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",m,xcr[m],xcrref[m],xcrdif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n",m,xcr[m],xcrref[m],xcrdif[m]); } } if (( *class) != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m <= 4; m += 1) { if (( *class) == 'U') { printf(" %2d %20.13e\n",m,xce[m]); } else if (xcedif[m] > epsilon) { *verified = 0; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",m,xce[m],xceref[m],xcedif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n",m,xce[m],xceref[m],xcedif[m]); } } if (( *class) != 'U') { printf(" Comparison of surface integral\n"); } else { printf(" Surface integral\n"); } if (( *class) == 'U') { printf(" %20.13e\n",xci); } else if (xcidif > epsilon) { *verified = 0; printf(" FAILURE: %20.13e%20.13e%20.13e\n",xci,xciref,xcidif); } else { printf(" %20.13e%20.13e%20.13e\n",xci,xciref,xcidif); } if (( *class) == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if ( *verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } }
nautil.c
/***************************************************************************** * * * Auxiliary source file for version 2.2 of nauty. * * * * Copyright (1984-2002) Brendan McKay. All rights reserved. * * Subject to waivers and disclaimers in nauty.h. * * * * CHANGE HISTORY * * 10-Nov-87 : final changes for version 1.2 * * 5-Dec-87 : renamed to version 1.3 (no changes to this file) * * 28-Sep-88 : renamed to version 1.4 (no changes to this file) * * 23-Mar-89 : changes for version 1.5 : * * - added procedure refine1() * * - changed type of ptn from int* to nvector* in fmptn() * * - declared level in breakout() * * - changed char[] to char* in a few places * * - minor rearrangement in bestcell() * * 31-Mar-89 : - added procedure doref() * * 5-Apr-89 : - changed MAKEEMPTY uses to EMPTYSET * * 12-Apr-89 : - changed writeperm() and fmperm() to not use MARKing * * 5-May-89 : - redefined MASH to gain about 8% efficiency * * 18-Oct-90 : changes for version 1.6 : * * - improved line breaking in writeperm() * * 10-Nov-90 : - added dummy routine nautil_null() * * 27-Aug-92 : changes for version 1.7 : * * - made linelength <= 0 mean no line breaks * * 5-Jun-93 : renamed to version 1.7+ (no changes to this file) * * 18-Aug-93 : renamed to version 1.8 (no changes to this file) * * 17-Sep-93 : renamed to version 1.9 (no changes to this file) * * 29-Jun-95 : changes for version 1.10 : * * - replaced loop in nextelement() to save reference past * * end of array (thanks to Kevin Maylsiak) * * 11-Jul-96 : changes for version 2.0 : * * - added alloc_error() * * - added dynamic allocation * * 21-Oct-98 : use 077777 in place of INFINITY for CLEANUP() * * 9-Jan-00 : added nautil_check() * * 12-Feb-00 : did a little formating of the code * * 28-May-00 : added nautil_freedyn() * * 16-Aug-00 : added OLDNAUTY behaviour * * 16-Nov-00 : moved graph-specific things to naugraph.c * * use function prototypes, remove UPROC, nvector * * 22-Apr-01 : added code for compilation into Magma * * removed nautil_null() * * removed EXTDEFS and included labelorg * * 21-Nov-01 : use NAUTYREQUIRED in nautil_check() * * 26-Jun-02 : revised permset() to avoid fetch past the end of * * the array (thanks to Jan Kieffer) * * 17-Nov-03 : changed INFINITY to NAUTY_INFINITY * * 14-Sep-04 : extended prototypes to recursive functions * * * *****************************************************************************/ #ifdef _OPENMP #include <omp.h> #endif #define ONE_WORD_SETS #include "nauty.h" #ifdef NAUTY_IN_MAGMA #include "io.e" #endif /* macros for hash-codes: */ #define MASH(l,i) ((((l) ^ 065435) + (i)) & 077777) /* : expression whose long value depends only on long l and int/long i. Anything goes, preferably non-commutative. */ #define CLEANUP(l) ((int)((l) % 077777)) /* : expression whose value depends on long l and is less than 077777 when converted to int then short. Anything goes. */ #if MAXM==1 #define M 1 #else #define M m #endif #if !MAXN DYNALLSTAT(permutation,workperm,workperm_sz); #else static permutation workperm[MAXN]; #endif int labelorg = 0; int findFirstOf(const set* x,unsigned len); /* aproto: header new_nauty_protos.h */ /***************************************************************************** * * * nextelement(set1,m,pos) = the position of the first element in set set1 * * which occupies a position greater than pos. If no such element exists, * * the value is -1. pos can have any value less than n, including negative * * values. * * * * GLOBALS ACCESSED: none * * * *****************************************************************************/ int nextelement(set *set1, int m, int pos) { register setword setwd; register int w, l; #if MAXM==1 if (pos < 0) setwd = set1[0]; else setwd = set1[0] & BITMASK(pos); if (setwd == 0) return -1; else return FIRSTBIT(setwd); #else if (pos < 0) { w = 0; setwd = set1[0]; } else { w = SETWD(pos); setwd = set1[w] & BITMASK(SETBT(pos)); } // original BDM code here for reference. for (;;) { if (setwd != 0) return TIMESWORDSIZE(w) + FIRSTBIT(setwd); if (++w == m) return -1; setwd = set1[w]; } // correct, but slow!! // if (setwd != 0) return TIMESWORDSIZE(w) + FIRSTBIT(setwd); // ++w; // l=m-w; // if (l<=0 || (w+=findFirstOf(&set1[w],l))==m) // return -1; // else // return TIMESWORDSIZE(w) + FIRSTBIT(set1[w]); // // int first_nonzero=m; // // //#ifdef _OPENMP // omp_lock_t critical_lock; // omp_init_lock(&critical_lock); //#pragma omp parallel firstprivate(w) //#endif // { // int stride=1; //#ifdef _OPENMP // w+=omp_get_thread_num(); // stride=omp_get_num_threads(); //#endif // for (++w; w<first_nonzero; w+=stride) // { // if (set1[w]) //#ifdef _OPENMP // { // // try to obtain a lock, testing exit condition // while (!omp_test_lock(&critical_lock)) // { //#pragma omp flush(first_nonzero) // if (w>=first_nonzero) // goto first_nonzero_found; //give up, we're beaten // } // if (w<first_nonzero) // first_nonzero=w; // omp_unset_lock(&critical_lock); // } //#pragma omp flush(first_nonzero) // #else // first_nonzero=w; //#endif // } // first_nonzero_found: // } // // if (first_nonzero == m) // return -1; // else // return TIMESWORDSIZE(first_nonzero) + FIRSTBIT(set1[first_nonzero]); #endif } /***************************************************************************** * * * permset(set1,set2,m,perm) defines set2 to be the set * * {perm[i] | i in set1}. * * * * GLOBALS ACCESSED: bit<r>,leftbit<r> * * * *****************************************************************************/ void permset(set *set1, set *set2, int m, permutation *perm) { register setword setw; register int pos,w,b; EMPTYSET(set2,m); #if MAXM==1 setw = set1[0]; while (setw != 0) { TAKEBIT(b,setw); pos = perm[b]; ADDELEMENT(set2,pos); } #else for (w = 0; w < m; ++w) { setw = set1[w]; while (setw != 0) { TAKEBIT(b,setw); pos = perm[TIMESWORDSIZE(w)+b]; ADDELEMENT(set2,pos); } } #endif } /***************************************************************************** * * * putstring(f,s) writes the nul-terminated string s to file f. * * * *****************************************************************************/ void putstring(FILE *f, char *s) { while (*s != '\0') { PUTC(*s,f); ++s; } } /***************************************************************************** * * * itos(i,s) converts the int i to a nul-terminated decimal character * * string s. The value returned is the number of characters excluding * * the nul. * * * * GLOBALS ACCESSED: NONE * * * *****************************************************************************/ int itos(int i, char *s) { register int digit,j,k; register char c; int ans; if (i < 0) { k = 0; i = -i; j = 1; s[0] = '-'; } else { k = -1; j = 0; } do { digit = i % 10; i = i / 10; s[++k] = digit + '0'; } while (i); s[k+1] = '\0'; ans = k + 1; for (; j < k; ++j, --k) { c = s[j]; s[j] = s[k]; s[k] = c; } return ans; } /***************************************************************************** * * * orbits represents a partition of {0,1,...,n-1}, by orbits[i] = the * * smallest element in the same cell as i. orbjoin(orbits,autom,n) updates * * the partition orbits to the join of its current value and the cycle * * partition of perm. The function value returned is the new number of * * cells. * * * * GLOBALS ACCESSED: NONE * * * *****************************************************************************/ int orbjoin(int *orbits, permutation *perm, int n) { register int i,j1,j2; for (i = 0; i < n; ++i) { j1 = orbits[i]; while (orbits[j1] != j1) j1 = orbits[j1]; j2 = orbits[perm[i]]; while (orbits[j2] != j2) j2 = orbits[j2]; if (j1 < j2) orbits[j2] = j1; else if (j1 > j2) orbits[j1] = j2; } j1 = 0; for (i = 0; i < n; ++i) if ((orbits[i] = orbits[orbits[i]]) == i) ++j1; return j1; } /***************************************************************************** * * * writeperm(f,perm,cartesian,linelength,n) writes the permutation perm to * * the file f. The cartesian representation (i.e. perm itself) is used if * * cartesian != FALSE; otherwise the cyclic representation is used. No * * more than linelength characters (not counting '\n') are written on each * * line, unless linelength is ridiculously small. linelength<=0 causes no * * line breaks at all to be made. The global int labelorg is added to each * * vertex number. * * * * GLOBALS ACCESSED: itos(),putstring() * * * *****************************************************************************/ void writeperm(FILE *f, permutation *perm, boolean cartesian, int linelength, int n) { register int i,k,l,curlen,intlen; char s[30]; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"writeperm"); #endif /* CONDNL(x) writes end-of-line and 3 spaces if x characters won't fit on the current line. */ #define CONDNL(x) if (linelength>0 && curlen+(x)>linelength)\ {putstring(f,"\n ");curlen=3;} curlen = 0; if (cartesian) { for (i = 0; i < n; ++i) { intlen = itos(perm[i]+labelorg,s); CONDNL(intlen+1); PUTC(' ',f); putstring(f,s); curlen += intlen + 1; } PUTC('\n',f); } else { for (i = n; --i >= 0;) workperm[i] = 0; for (i = 0; i < n; ++i) { if (workperm[i] == 0 && perm[i] != i) { l = i; intlen = itos(l+labelorg,s); if (curlen > 3) CONDNL(2*intlen+4); PUTC('(',f); do { putstring(f,s); curlen += intlen + 1; k = l; l = perm[l]; workperm[k] = 1; if (l != i) { intlen = itos(l+labelorg,s); CONDNL(intlen+2); PUTC(' ',f); } } while (l != i); PUTC(')',f); ++curlen; } } if (curlen == 0) putstring(f,"(1)\n"); else PUTC('\n',f); } } /***************************************************************************** * * * fmperm(perm,fix,mcr,m,n) uses perm to construct fix and mcr. fix * * contains those points are fixed by perm, while mcr contains the set of * * those points which are least in their orbits. * * * * GLOBALS ACCESSED: bit<r> * * * *****************************************************************************/ void fmperm(permutation *perm, set *fix, set *mcr, int m, int n) { register int i,k,l; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"writeperm"); #endif EMPTYSET(fix,m); EMPTYSET(mcr,m); for (i = n; --i >= 0;) workperm[i] = 0; for (i = 0; i < n; ++i) if (perm[i] == i) { ADDELEMENT(fix,i); ADDELEMENT(mcr,i); } else if (workperm[i] == 0) { l = i; do { k = l; l = perm[l]; workperm[k] = 1; } while (l != i); ADDELEMENT(mcr,i); } } /***************************************************************************** * * * fmptn(lab,ptn,level,fix,mcr,m,n) uses the partition at the specified * * level in the partition nest (lab,ptn) to make sets fix and mcr. fix * * represents the points in trivial cells of the partition, while mcr * * represents those points which are least in their cells. * * * * GLOBALS ACCESSED: bit<r> * * * *****************************************************************************/ void fmptn(int *lab, int *ptn, int level, set *fix, set *mcr, int m, int n) { register int i,lmin; EMPTYSET(fix,m); EMPTYSET(mcr,m); for (i = 0; i < n; ++i) if (ptn[i] <= level) { ADDELEMENT(fix,lab[i]); ADDELEMENT(mcr,lab[i]); } else { lmin = lab[i]; do if (lab[++i] < lmin) lmin = lab[i]; while (ptn[i] > level); ADDELEMENT(mcr,lmin); } } /***************************************************************************** * * * doref(g,lab,ptn,level,numcells,qinvar,invar,active,code,refproc, * * invarproc,mininvarlev,maxinvarlev,invararg,digraph,m,n) * * is used to perform a refinement on the partition at the given level in * * (lab,ptn). The number of cells is *numcells both for input and output. * * The input active is the active set for input to the refinement procedure * * (*refproc)(), which must have the argument list of refine(). * * active may be arbitrarily changed. invar is used for working storage. * * First, (*refproc)() is called. Then, if invarproc!=NULL and * * |mininvarlev| <= level <= |maxinvarlev|, the routine (*invarproc)() is * * used to compute a vertex-invariant which may refine the partition * * further. If it does, (*refproc)() is called again, using an active set * * containing all but the first fragment of each old cell. Unless g is a * * digraph, this guarantees that the final partition is equitable. The * * arguments invararg and digraph are passed to (*invarproc)() * * uninterpretted. The output argument code is a composite of the codes * * from all the calls to (*refproc)(). The output argument qinvar is set * * to 0 if (*invarproc)() is not applied, 1 if it is applied but fails to * * refine the partition, and 2 if it succeeds. * * See the file nautinv.c for a further discussion of vertex-invariants. * * Note that the dreadnaut I command generates a call to this procedure * * with level = mininvarlevel = maxinvarlevel = 0. * * * *****************************************************************************/ void doref(graph *g, int *lab, int *ptn, int level, int *numcells, int *qinvar, permutation *invar, set *active, int *code, void (*refproc)(graph*,int*,int*,int,int*,permutation*,set*,int*,int,int), void (*invarproc)(graph*,int*,int*,int,int,int,permutation*, int,boolean,int,int), int mininvarlev, int maxinvarlev, int invararg, boolean digraph, int m, int n) { register int j,h; register permutation pw; int iw; int i,cell1,cell2,nc,tvpos,minlev,maxlev; long longcode; boolean same; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"doref"); #endif if ((tvpos = nextelement(active,M,-1)) < 0) tvpos = 0; (*refproc)(g,lab,ptn,level,numcells,invar,active,code,M,n); minlev = (mininvarlev < 0 ? -mininvarlev : mininvarlev); maxlev = (maxinvarlev < 0 ? -maxinvarlev : maxinvarlev); if (invarproc != NULL && *numcells < n && level >= minlev && level <= maxlev) { (*invarproc)(g,lab,ptn,level,*numcells,tvpos,invar,invararg, digraph,M,n); EMPTYSET(active,m); for (i = n; --i >= 0;) workperm[i] = invar[lab[i]]; nc = *numcells; for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { pw = workperm[cell1]; same = TRUE; for (cell2 = cell1; ptn[cell2] > level; ++cell2) if (workperm[cell2+1] != pw) same = FALSE; if (same) continue; j = (cell2 - cell1 + 1) / 3; h = 1; do h = 3 * h + 1; while (h < j); do /* shell sort */ { for (i = cell1 + h; i <= cell2; ++i) { iw = lab[i]; pw = workperm[i]; for (j = i; workperm[j-h] > pw; ) { workperm[j] = workperm[j-h]; lab[j] = lab[j-h]; if ((j -= h) < cell1 + h) break; } workperm[j] = pw; lab[j] = iw; } h /= 3; } while (h > 0); for (i = cell1 + 1; i <= cell2; ++i) if (workperm[i] != workperm[i-1]) { ptn[i-1] = level; ++*numcells; ADDELEMENT(active,i); } } if (*numcells > nc) { *qinvar = 2; longcode = *code; (*refproc)(g,lab,ptn,level,numcells,invar,active,code,M,n); longcode = MASH(longcode,*code); *code = CLEANUP(longcode); } else *qinvar = 1; } else *qinvar = 0; } /***************************************************************************** * * * targetcell(g,lab,ptn,level,numcells,tcell,tcellsize,&cellpos,tc_level, * * hint,goodcell,m,n) * * examines the partition at the specified level in the partition nest * * (lab,ptn) and finds a non-trival cell (if none, the first cell). * * If hint >= 0 and there is a non-trivial cell starting at position hint * * in lab, that cell is chosen. * * Else, If level <= tc_level, *goodcell is called to choose a cell. * * Else, the first non-trivial cell is chosen. * * When a cell is chosen, tcell is set to its contents, *tcellsize to its * * size, and cellpos to its starting position in lab. * * * * GLOBALS ACCESSED: bit<r> * * * *****************************************************************************/ void targetcell(graph *g, int *lab, int *ptn, int level, int numcells, set *tcell, int *tcellsize, int *cellpos, int tc_level, int hint, int (*goodcell)(graph*,int*,int*,int,int,int,int), int m, int n) { register int i,j,k; if (hint >= 0 && ptn[hint] > level && (hint == 0 || ptn[hint-1] <= level)) i = hint; else if (level <= tc_level && goodcell != NULL) i = (*goodcell)(g,lab,ptn,level,tc_level,m,n); else for (i = 0; i < n && ptn[i] <= level; ++i) {} if (i == n) i = j = 0; else for (j = i + 1; ptn[j] > level; ++j) {} *tcellsize = j - i + 1; EMPTYSET(tcell,m); for (k = i; k <= j; ++k) ADDELEMENT(tcell,lab[k]); *cellpos = i; } /***************************************************************************** * * * shortprune(set1,set2,m) ANDs the contents of set set2 into set set1. * * * * GLOBALS ACCESSED: NONE * * * *****************************************************************************/ void shortprune(set *set1, set *set2, int m) { register int i; for (i = 0; i < M; ++i) INTERSECT(set1[i],set2[i]); } /***************************************************************************** * * * breakout(lab,ptn,level,tc,tv,active,m) operates on the partition at * * the specified level in the partition nest (lab,ptn). It finds the * * element tv, which is in the cell C starting at index tc in lab (it had * * better be) and splits C in the two cells {tv} and C\{tv}, in that order. * * It also sets the set active to contain just the element tc. * * * * GLOBALS ACCESSED: bit<r> * * * *****************************************************************************/ void breakout(int *lab, int *ptn, int level, int tc, int tv, set *active, int m) { register int i,prev,next; EMPTYSET(active,m); ADDELEMENT(active,tc); i = tc; prev = tv; do { next = lab[i]; lab[i++] = prev; prev = next; } while (prev != tv); ptn[tc] = level; } /***************************************************************************** * * * longprune(tcell,fix,bottom,top,m) removes zero or elements of the set * * tcell. It is assumed that addresses bottom through top-1 contain * * contiguous pairs of sets (f1,m1),(f2,m2), ... . tcell is intersected * * with each mi such that fi is a subset of fix. * * * * GLOBALS ACCESSED: NONE * * * *****************************************************************************/ void longprune(set *tcell, set *fix, set *bottom, set *top, int m) { register int i; while (bottom < top) { for (i = 0; i < M; ++i) if (NOTSUBSET(fix[i],bottom[i])) break; bottom += M; if (i == M) for (i = 0; i < M; ++i) INTERSECT(tcell[i],bottom[i]); bottom += M; } } /***************************************************************************** * * * nautil_check() checks that this file is compiled compatibly with the * * given parameters. If not, call exit(1). * * * *****************************************************************************/ void nautil_check(int wordsize, int m, int n, int version) { if (wordsize != WORDSIZE) { fprintf(ERRFILE,"Error: WORDSIZE mismatch in nautil.c\n"); exit(1); } #if MAXN if (m > MAXM) { fprintf(ERRFILE,"Error: MAXM inadequate in nautil.c\n"); exit(1); } if (n > MAXN) { fprintf(ERRFILE,"Error: MAXN inadequate in nautil.c\n"); exit(1); } #endif #ifdef BIGNAUTY if ((version & 1) == 0) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in nautil.c\n"); exit(1); } #else if ((version & 1) == 1) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in nautil.c\n"); exit(1); } #endif if (version < NAUTYREQUIRED) { fprintf(ERRFILE,"Error: nautil.c version mismatch\n"); exit(1); } } /***************************************************************************** * * * alloc_error() writes a message and exits. Used by DYNALLOC? macros. * * * *****************************************************************************/ void alloc_error(char *s) { fprintf(ERRFILE,"Dynamic allocation failed: %s\n",s); exit(2); } /***************************************************************************** * * * nautil_freedyn() - free the dynamic memory in this module * * * *****************************************************************************/ void nautil_freedyn(void) { #if !MAXN DYNFREE(workperm,workperm_sz); #endif }
kmp_csupport.c
/* * kmp_csupport.c -- kfront linkage support for OpenMP. */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "omp.h" /* extern "C" declarations of user-visible routines */ #include "kmp.h" #include "kmp_i18n.h" #include "kmp_itt.h" #include "kmp_lock.h" #include "kmp_error.h" #include "kmp_stats.h" #if OMPT_SUPPORT #include "ompt-internal.h" #include "ompt-specific.h" #endif #define MAX_MESSAGE 512 /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /* flags will be used in future, e.g., to implement */ /* openmp_strict library restrictions */ /*! * @ingroup STARTUP_SHUTDOWN * @param loc in source location information * @param flags in for future use (currently ignored) * * Initialize the runtime library. This call is optional; if it is not made then * it will be implicitly called by attempts to use other library functions. * */ void __kmpc_begin(ident_t *loc, kmp_int32 flags) { // By default __kmp_ignore_mppbeg() returns TRUE. if (__kmp_ignore_mppbeg() == FALSE) { __kmp_internal_begin(); KC_TRACE( 10, ("__kmpc_begin: called\n" ) ); } } /*! * @ingroup STARTUP_SHUTDOWN * @param loc source location information * * Shutdown the runtime library. This is also optional, and even if called will not * do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero. */ void __kmpc_end(ident_t *loc) { // By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op. // However, this can be overridden with KMP_IGNORE_MPPEND environment variable. // If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end() // will unregister this root (it can cause library shut down). if (__kmp_ignore_mppend() == FALSE) { KC_TRACE( 10, ("__kmpc_end: called\n" ) ); KA_TRACE( 30, ("__kmpc_end\n" )); __kmp_internal_end_thread( -1 ); } } /*! @ingroup THREAD_STATES @param loc Source location information. @return The global thread index of the active thread. This function can be called in any context. If the runtime has ony been entered at the outermost level from a single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that which would be returned by omp_get_thread_num() in the outermost active parallel construct. (Or zero if there is no active parallel construct, since the master thread is necessarily thread zero). If multiple non-OpenMP threads all enter an OpenMP construct then this will be a unique thread identifier among all the threads created by the OpenMP runtime (but the value cannote be defined in terms of OpenMP thread ids returned by omp_get_thread_num()). */ kmp_int32 __kmpc_global_thread_num(ident_t *loc) { kmp_int32 gtid = __kmp_entry_gtid(); KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) ); return gtid; } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads under control of the OpenMP<sup>*</sup> runtime This function can be called in any context. It returns the total number of threads under the control of the OpenMP runtime. That is not a number that can be determined by any OpenMP standard calls, since the library may be called from more than one non-OpenMP thread, and this reflects the total over all such calls. Similarly the runtime maintains underlying threads even when they are not active (since the cost of creating and destroying OS threads is high), this call counts all such threads even if they are not waiting for work. */ kmp_int32 __kmpc_global_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) ); return TCR_4(__kmp_nth); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The thread number of the calling thread in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_thread_num(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) ); return __kmp_tid_from_gtid( __kmp_entry_gtid() ); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) ); return __kmp_entry_thread() -> th.th_team -> t.t_nproc; } /*! * @ingroup DEPRECATED * @param loc location description * * This function need not be called. It always returns TRUE. */ kmp_int32 __kmpc_ok_to_fork(ident_t *loc) { #ifndef KMP_DEBUG return TRUE; #else const char *semi2; const char *semi3; int line_no; if (__kmp_par_range == 0) { return TRUE; } semi2 = loc->psource; if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2, ';'); if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2 + 1, ';'); if (semi2 == NULL) { return TRUE; } if (__kmp_par_range_filename[0]) { const char *name = semi2 - 1; while ((name > loc->psource) && (*name != '/') && (*name != ';')) { name--; } if ((*name == '/') || (*name == ';')) { name++; } if (strncmp(__kmp_par_range_filename, name, semi2 - name)) { return __kmp_par_range < 0; } } semi3 = strchr(semi2 + 1, ';'); if (__kmp_par_range_routine[0]) { if ((semi3 != NULL) && (semi3 > semi2) && (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) { return __kmp_par_range < 0; } } if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) { if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) { return __kmp_par_range > 0; } return __kmp_par_range < 0; } return TRUE; #endif /* KMP_DEBUG */ } /*! @ingroup THREAD_STATES @param loc Source location information. @return 1 if this thread is executing inside an active parallel region, zero if not. */ kmp_int32 __kmpc_in_parallel( ident_t *loc ) { return __kmp_entry_thread() -> th.th_root -> r.r_active; } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_threads number of threads requested for this parallel construct Set the number of threads to be used by the next fork spawned by this thread. This call is only required if the parallel construct has a `num_threads` clause. */ void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n", global_tid, num_threads ) ); __kmp_push_num_threads( loc, global_tid, num_threads ); } void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid ) { KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) ); /* the num_threads are automatically popped */ } #if OMP_40_ENABLED void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind ) { KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", global_tid, proc_bind ) ); __kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind ); } #endif /* OMP_40_ENABLED */ /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined parallel construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); #if (KMP_STATS_ENABLED) int inParallel = __kmpc_in_parallel(loc); if (inParallel) { KMP_COUNT_BLOCK(OMP_NESTED_PARALLEL); } else { KMP_COUNT_BLOCK(OMP_PARALLEL); } #endif // maybe to save thr_state is enough here { va_list ap; va_start( ap, microtask ); #if OMPT_SUPPORT int tid = __kmp_tid_from_gtid( gtid ); kmp_info_t *master_th = __kmp_threads[ gtid ]; kmp_team_t *parent_team = master_th->th.th_team; if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif #if INCLUDE_SSC_MARKS SSC_MARK_FORKING(); #endif __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) microtask, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_task_func, /* TODO: revert workaround for Intel(R) 64 tracker #96 */ #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); #if INCLUDE_SSC_MARKS SSC_MARK_JOINING(); #endif __kmp_join_call( loc, gtid #if OMPT_SUPPORT , fork_context_intel #endif ); va_end( ap ); #if OMPT_SUPPORT if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = 0; } #endif } } #if OMP_40_ENABLED /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_teams number of teams requested for the teams construct @param num_threads number of threads per team requested for the teams construct Set the number of teams to be used by the teams construct. This call is only required if the teams construct has a `num_teams` clause or a `thread_limit` clause (or both). */ void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n", global_tid, num_teams, num_threads ) ); __kmp_push_num_teams( loc, global_tid, num_teams, num_threads ); } /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined teams construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); kmp_info_t *this_thr = __kmp_threads[ gtid ]; va_list ap; va_start( ap, microtask ); KMP_COUNT_BLOCK(OMP_TEAMS); // remember teams entry point and nesting level this_thr->th.th_teams_microtask = microtask; this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host #if OMPT_SUPPORT kmp_team_t *parent_team = this_thr->th.th_team; int tid = __kmp_tid_from_gtid( gtid ); if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif // check if __kmpc_push_num_teams called, set default number of teams otherwise if ( this_thr->th.th_teams_size.nteams == 0 ) { __kmp_push_num_teams( loc, gtid, 0, 0 ); } KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1); __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_teams_master, #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); __kmp_join_call( loc, gtid #if OMPT_SUPPORT , fork_context_intel #endif ); #if OMPT_SUPPORT if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = NULL; } #endif this_thr->th.th_teams_microtask = NULL; this_thr->th.th_teams_level = 0; *(kmp_int64*)(&this_thr->th.th_teams_size) = 0L; va_end( ap ); } #endif /* OMP_40_ENABLED */ // // I don't think this function should ever have been exported. // The __kmpc_ prefix was misapplied. I'm fairly certain that no generated // openmp code ever called it, but it's been exported from the RTL for so // long that I'm afraid to remove the definition. // int __kmpc_invoke_task_func( int gtid ) { return __kmp_invoke_task_func( gtid ); } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Enter a serialized parallel construct. This interface is used to handle a conditional parallel region, like this, @code #pragma omp parallel if (condition) @endcode when the condition is false. */ void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { __kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with * kmp_fork_call since the tasks to be done are similar in each case. */ } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Leave a serialized parallel construct. */ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { kmp_internal_control_t *top; kmp_info_t *this_thr; kmp_team_t *serial_team; KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) ); /* skip all this code for autopar serialized loops since it results in unacceptable overhead */ if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) ) return; // Not autopar code if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); this_thr = __kmp_threads[ global_tid ]; serial_team = this_thr->th.th_serial_team; #if OMP_45_ENABLED kmp_task_team_t * task_team = this_thr->th.th_task_team; // we need to wait for the proxy tasks before finishing the thread if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks ) __kmp_task_team_wait(this_thr, serial_team USE_ITT_BUILD_ARG(NULL) ); // is an ITT object needed here? #endif KMP_MB(); KMP_DEBUG_ASSERT( serial_team ); KMP_ASSERT( serial_team -> t.t_serialized ); KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team ); KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr ); /* If necessary, pop the internal control stack values and replace the team values */ top = serial_team -> t.t_control_stack_top; if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) { copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top ); serial_team -> t.t_control_stack_top = top -> next; __kmp_free(top); } //if( serial_team -> t.t_serialized > 1 ) serial_team -> t.t_level--; /* pop dispatch buffers stack */ KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer); { dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer; serial_team->t.t_dispatch->th_disp_buffer = serial_team->t.t_dispatch->th_disp_buffer->next; __kmp_free( disp_buffer ); } -- serial_team -> t.t_serialized; if ( serial_team -> t.t_serialized == 0 ) { /* return to the parallel section */ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) { __kmp_clear_x87_fpu_status_word(); __kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word ); __kmp_load_mxcsr( &serial_team->t.t_mxcsr ); } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ this_thr -> th.th_team = serial_team -> t.t_parent; this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid; /* restore values cached in the thread */ this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */ this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */ this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized; /* TODO the below shouldn't need to be adjusted for serialized teams */ this_thr -> th.th_dispatch = & this_thr -> th.th_team -> t.t_dispatch[ serial_team -> t.t_master_tid ]; __kmp_pop_current_task_from_thread( this_thr ); KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 ); this_thr -> th.th_current_task -> td_flags.executing = 1; if ( __kmp_tasking_mode != tskm_immediate_exec ) { // Copy the task team from the new child / old parent team to the thread. this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]; KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n", global_tid, this_thr -> th.th_task_team, this_thr -> th.th_team ) ); } } else { if ( __kmp_tasking_mode != tskm_immediate_exec ) { KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n", global_tid, serial_team, serial_team -> t.t_serialized ) ); } } if ( __kmp_env_consistency_check ) __kmp_pop_parallel( global_tid, NULL ); } /*! @ingroup SYNCHRONIZATION @param loc source location information. Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though depending on the memory ordering convention obeyed by the compiler even that may not be necessary). */ void __kmpc_flush(ident_t *loc) { KC_TRACE( 10, ("__kmpc_flush: called\n" ) ); /* need explicit __mf() here since use volatile instead in library */ KMP_MB(); /* Flush all pending memory write invalidates. */ #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) #if KMP_MIC // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used. // We shouldn't need it, though, since the ABI rules require that // * If the compiler generates NGO stores it also generates the fence // * If users hand-code NGO stores they should insert the fence // therefore no incomplete unordered stores should be visible. #else // C74404 // This is to address non-temporal store instructions (sfence needed). // The clflush instruction is addressed either (mfence needed). // Probably the non-temporal load monvtdqa instruction should also be addressed. // mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2. if ( ! __kmp_cpuinfo.initialized ) { __kmp_query_cpuid( & __kmp_cpuinfo ); }; // if if ( ! __kmp_cpuinfo.sse2 ) { // CPU cannot execute SSE2 instructions. } else { #if KMP_COMPILER_ICC _mm_mfence(); #elif KMP_COMPILER_MSVC MemoryBarrier(); #else __sync_synchronize(); #endif // KMP_COMPILER_ICC }; // if #endif // KMP_MIC #elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64) // Nothing to see here move along #elif KMP_ARCH_PPC64 // Nothing needed here (we have a real MB above). #if KMP_OS_CNK // The flushing thread needs to yield here; this prevents a // busy-waiting thread from saturating the pipeline. flush is // often used in loops like this: // while (!flag) { // #pragma omp flush(flag) // } // and adding the yield here is good for at least a 10x speedup // when running >2 threads per core (on the NAS LU benchmark). __kmp_yield(TRUE); #endif #else #error Unknown or unsupported architecture #endif } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Execute a barrier. */ void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_BARRIER); KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? }; // if __kmp_check_barrier( global_tid, ct_barrier, loc ); } __kmp_threads[ global_tid ]->th.th_ident = loc; // TODO: explicit barrier_wait_id: // this function is called when 'barrier' directive is present or // implicit barrier at the end of a worksharing construct. // 1) better to add a per-thread barrier counter to a thread data structure // 2) set to 0 when a new team is created // 4) no sync is required __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } /* The BARRIER for a MASTER section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise. */ kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) { int status = 0; KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) ); if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); if( KMP_MASTER_GTID( global_tid )) { KMP_COUNT_BLOCK(OMP_MASTER); KMP_PUSH_PARTITIONED_TIMER(OMP_master); status = 1; } #if OMPT_SUPPORT && OMPT_TRACE if (status) { if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_master_begin)) { kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } } #endif if ( __kmp_env_consistency_check ) { #if KMP_USE_DYNAMIC_LOCK if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL, 0 ); else __kmp_check_sync( global_tid, ct_master, loc, NULL, 0 ); #else if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL ); else __kmp_check_sync( global_tid, ct_master, loc, NULL ); #endif } return status; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . Mark the end of a <tt>master</tt> region. This should only be called by the thread that executes the <tt>master</tt> region. */ void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) ); KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid )); KMP_POP_PARTITIONED_TIMER(); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_master_end)) { int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif if ( __kmp_env_consistency_check ) { if( global_tid < 0 ) KMP_WARNING( ThreadIdentInvalid ); if( KMP_MASTER_GTID( global_tid )) __kmp_pop_sync( global_tid, ct_master, loc ); } } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. Start execution of an <tt>ordered</tt> construct. */ void __kmpc_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KMP_DEBUG_ASSERT( __kmp_init_serial ); KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); #if USE_ITT_BUILD __kmp_itt_ordered_prep( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { /* OMPT state update */ th->th.ompt_thread_info.wait_id = (uint64_t) loc; th->th.ompt_thread_info.state = ompt_state_wait_ordered; /* OMPT event callback */ if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) { ompt_callbacks.ompt_callback(ompt_event_wait_ordered)( th->th.ompt_thread_info.wait_id); } } #endif if ( th -> th.th_dispatch -> th_deo_fcn != 0 ) (*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_deo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { /* OMPT state update */ th->th.ompt_thread_info.state = ompt_state_work_parallel; th->th.ompt_thread_info.wait_id = 0; /* OMPT event callback */ if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) { ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)( th->th.ompt_thread_info.wait_id); } } #endif #if USE_ITT_BUILD __kmp_itt_ordered_start( gtid ); #endif /* USE_ITT_BUILD */ } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. End execution of an <tt>ordered</tt> construct. */ void __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) ); #if USE_ITT_BUILD __kmp_itt_ordered_end( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; if ( th -> th.th_dispatch -> th_dxo_fcn != 0 ) (*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_dxo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_ordered)) { ompt_callbacks.ompt_callback(ompt_event_release_ordered)( th->th.ompt_thread_info.wait_id); } #endif } #if KMP_USE_DYNAMIC_LOCK static __forceinline void __kmp_init_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_indirect_locktag_t tag) { // Pointer to the allocated indirect lock is written to crit, while indexing is ignored. void *idx; kmp_indirect_lock_t **lck; lck = (kmp_indirect_lock_t **)crit; kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag); KMP_I_LOCK_FUNC(ilk, init)(ilk->lock); KMP_SET_I_LOCK_LOCATION(ilk, loc); KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section); KA_TRACE(20, ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag)); #if USE_ITT_BUILD __kmp_itt_critical_creating(ilk->lock, loc); #endif int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk); if (status == 0) { #if USE_ITT_BUILD __kmp_itt_critical_destroyed(ilk->lock); #endif // We don't really need to destroy the unclaimed lock here since it will be cleaned up at program exit. //KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx); } KMP_DEBUG_ASSERT(*lck != NULL); } // Fast-path acquire tas lock #define KMP_ACQUIRE_TAS_LOCK(lock, gtid) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ if (l->lk.poll != KMP_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ kmp_uint32 spins; \ KMP_FSYNC_PREPARE(l); \ KMP_INIT_YIELD(spins); \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ kmp_backoff_t backoff = __kmp_spin_backoff_params; \ while (l->lk.poll != KMP_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ __kmp_spin_backoff(&backoff); \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ } \ } \ KMP_FSYNC_ACQUIRED(l); \ } // Fast-path test tas lock #define KMP_TEST_TAS_LOCK(lock, gtid, rc) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ rc = l->lk.poll == KMP_LOCK_FREE(tas) && \ KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas)); \ } // Fast-path release tas lock #define KMP_RELEASE_TAS_LOCK(lock, gtid) { \ TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); \ KMP_MB(); \ } #if KMP_USE_FUTEX # include <unistd.h> # include <sys/syscall.h> # ifndef FUTEX_WAIT # define FUTEX_WAIT 0 # endif # ifndef FUTEX_WAKE # define FUTEX_WAKE 1 # endif // Fast-path acquire futex lock #define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ kmp_int32 gtid_code = (gtid+1) << 1; \ KMP_MB(); \ KMP_FSYNC_PREPARE(ftx); \ kmp_int32 poll_val; \ while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \ KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \ kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \ if (!cond) { \ if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) { \ continue; \ } \ poll_val |= KMP_LOCK_BUSY(1, futex); \ } \ kmp_int32 rc; \ if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \ continue; \ } \ gtid_code |= 1; \ } \ KMP_FSYNC_ACQUIRED(ftx); \ } // Fast-path test futex lock #define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1 << 1, futex))) { \ KMP_FSYNC_ACQUIRED(ftx); \ rc = TRUE; \ } else { \ rc = FALSE; \ } \ } // Fast-path release futex lock #define KMP_RELEASE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ KMP_MB(); \ KMP_FSYNC_RELEASING(ftx); \ kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \ if (KMP_LOCK_STRIP(poll_val) & 1) { \ syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \ } \ KMP_MB(); \ KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \ } #endif // KMP_USE_FUTEX #else // KMP_USE_DYNAMIC_LOCK static kmp_user_lock_p __kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid ) { kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit; // // Because of the double-check, the following load // doesn't need to be volatile. // kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); if ( lck == NULL ) { void * idx; // Allocate & initialize the lock. // Remember allocated locks in table in order to free them in __kmp_cleanup() lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section ); __kmp_init_user_lock_with_checks( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_critical_creating( lck ); // __kmp_itt_critical_creating() should be called *before* the first usage of underlying // lock. It is the only place where we can guarantee it. There are chances the lock will // destroyed with no usage, but it is not a problem, because this is not real event seen // by user but rather setting name for object (lock). See more details in kmp_itt.h. #endif /* USE_ITT_BUILD */ // // Use a cmpxchg instruction to slam the start of the critical // section with the lock pointer. If another thread beat us // to it, deallocate the lock, and use the lock that the other // thread allocated. // int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck ); if ( status == 0 ) { // Deallocate the lock and reload the value. #if USE_ITT_BUILD __kmp_itt_critical_destroyed( lck ); // Let ITT know the lock is destroyed and the same memory location may be reused for // another purpose. #endif /* USE_ITT_BUILD */ __kmp_destroy_user_lock_with_checks( lck ); __kmp_user_lock_free( &idx, gtid, lck ); lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); KMP_DEBUG_ASSERT( lck != NULL ); } } return lck; } #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Enter code protected by a `critical` construct. This function blocks until the executing thread can enter the critical section. */ void __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { #if KMP_USE_DYNAMIC_LOCK __kmpc_critical_with_hint(loc, global_tid, crit, omp_lock_hint_none); #else KMP_COUNT_BLOCK(OMP_CRITICAL); KMP_TIME_PARTITIONED_BLOCK(OMP_critical_wait); /* Time spent waiting to enter the critical section */ kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); //TODO: add THR_OVHD_STATE KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); /* since the critical directive binds to all threads, not just * the current team we have to check this even if we are in a * serialized team */ /* also, even if we are the uber thread, we still have to conduct the lock, * as we have to contend with sibling threads */ #if USE_ITT_BUILD __kmp_itt_critical_acquiring( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_acquire_user_lock_with_checks( lck, global_tid ); #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KMP_START_EXPLICIT_TIMER(OMP_critical); KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); #endif // KMP_USE_DYNAMIC_LOCK } #if KMP_USE_DYNAMIC_LOCK // Converts the given hint to an internal lock implementation static __forceinline kmp_dyna_lockseq_t __kmp_map_hint_to_lock(uintptr_t hint) { #if KMP_USE_TSX # define KMP_TSX_LOCK(seq) lockseq_##seq #else # define KMP_TSX_LOCK(seq) __kmp_user_lock_seq #endif #if KMP_ARCH_X86 || KMP_ARCH_X86_64 # define KMP_CPUINFO_RTM (__kmp_cpuinfo.rtm) #else # define KMP_CPUINFO_RTM 0 #endif // Hints that do not require further logic if (hint & kmp_lock_hint_hle) return KMP_TSX_LOCK(hle); if (hint & kmp_lock_hint_rtm) return KMP_CPUINFO_RTM ? KMP_TSX_LOCK(rtm): __kmp_user_lock_seq; if (hint & kmp_lock_hint_adaptive) return KMP_CPUINFO_RTM ? KMP_TSX_LOCK(adaptive): __kmp_user_lock_seq; // Rule out conflicting hints first by returning the default lock if ((hint & omp_lock_hint_contended) && (hint & omp_lock_hint_uncontended)) return __kmp_user_lock_seq; if ((hint & omp_lock_hint_speculative) && (hint & omp_lock_hint_nonspeculative)) return __kmp_user_lock_seq; // Do not even consider speculation when it appears to be contended if (hint & omp_lock_hint_contended) return lockseq_queuing; // Uncontended lock without speculation if ((hint & omp_lock_hint_uncontended) && !(hint & omp_lock_hint_speculative)) return lockseq_tas; // HLE lock for speculation if (hint & omp_lock_hint_speculative) return KMP_TSX_LOCK(hle); return __kmp_user_lock_seq; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number. @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. @param hint the lock hint. Enter code protected by a `critical` construct with a hint. The hint value is used to suggest a lock implementation. This function blocks until the executing thread can enter the critical section unless the hint suggests use of speculative execution and the hardware supports it. */ void __kmpc_critical_with_hint( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit, uintptr_t hint ) { KMP_COUNT_BLOCK(OMP_CRITICAL); kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; // Check if it is initialized. if (*lk == 0) { kmp_dyna_lockseq_t lckseq = __kmp_map_hint_to_lock(hint); if (KMP_IS_D_LOCK(lckseq)) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(lckseq)); } else { __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq)); } } // Branch for accessing the actual lock object and set operation. This branching is inevitable since // this lock initialization does not follow the normal dispatch path (lock table is not used). if (KMP_EXTRACT_D_TAG(lk) != 0) { lck = (kmp_user_lock_p)lk; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif # if KMP_USE_INLINED_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { KMP_ACQUIRE_TAS_LOCK(lck, global_tid); } else # elif KMP_USE_INLINED_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid); } else # endif { KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); } } else { kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KMP_PUSH_PARTITIONED_TIMER(OMP_critical); KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); } // __kmpc_critical_with_hint #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Leave a critical section, releasing any lock that was held during its execution. */ void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit) { kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid )); #if KMP_USE_DYNAMIC_LOCK if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; KMP_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif # if KMP_USE_INLINED_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { KMP_RELEASE_TAS_LOCK(lck, global_tid); } else # elif KMP_USE_INLINED_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { KMP_RELEASE_FUTEX_LOCK(lck, global_tid); } else # endif { KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); KMP_ASSERT(ilk != NULL); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit)); } KMP_ASSERT(lck != NULL); if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); #if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_release_user_lock_with_checks( lck, global_tid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_critical)) { ompt_callbacks.ompt_callback(ompt_event_release_critical)( (uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK KMP_POP_PARTITIONED_TIMER(); KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid )); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master. The barrier is executed inside this function. */ kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) { int status; KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) __kmp_check_barrier( global_tid, ct_barrier, loc ); #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL ); return (status != 0) ? 0 : 1; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Complete the execution of a combined barrier and master. This function should only be called at the completion of the <tt>master</tt> code. Other threads will still be waiting at the barrier and this call releases them. */ void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid )); __kmp_end_split_barrier ( bs_plain_barrier, global_tid ); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master(nowait) construct. The barrier is executed inside this function. There is no equivalent "end" function, since the */ kmp_int32 __kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid ) { kmp_int32 ret; KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? } __kmp_check_barrier( global_tid, ct_barrier, loc ); } #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); ret = __kmpc_master (loc, global_tid); if ( __kmp_env_consistency_check ) { /* there's no __kmpc_end_master called; so the (stats) */ /* actions of __kmpc_end_master are done here */ if ( global_tid < 0 ) { KMP_WARNING( ThreadIdentInvalid ); } if (ret) { /* only one thread should do the pop since only */ /* one did the push (see __kmpc_master()) */ __kmp_pop_sync( global_tid, ct_master, loc ); } } return (ret); } /* The BARRIER for a SINGLE process section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number @return One if this thread should execute the single construct, zero otherwise. Test whether to execute a <tt>single</tt> construct. There are no implicit barriers in the two "single" calls, rather the compiler should introduce an explicit barrier if it is required. */ kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid) { kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE ); if (rc) { // We are going to execute the single statement, so we should count it. KMP_COUNT_BLOCK(OMP_SINGLE); KMP_PUSH_PARTITIONED_TIMER(OMP_single); } #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if (ompt_enabled) { if (rc) { if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id, team->t.ompt_team_info.microtask); } } else { if (ompt_callbacks.ompt_callback(ompt_event_single_others_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_others_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } this_thr->th.ompt_thread_info.state = ompt_state_wait_single; } } #endif return rc; } /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number Mark the end of a <tt>single</tt> construct. This function should only be called by the thread that executed the block of code protected by the `single` construct. */ void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid) { __kmp_exit_single( global_tid ); KMP_POP_PARTITIONED_TIMER(); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif } /*! @ingroup WORK_SHARING @param loc Source location @param global_tid Global thread id Mark the end of a statically scheduled loop. */ void __kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid ) { KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid)); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_end)) { ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); ompt_task_info_t *task_info = __ompt_get_taskinfo(0); ompt_callbacks.ompt_callback(ompt_event_loop_end)( team_info->parallel_id, task_info->task_id); } #endif if ( __kmp_env_consistency_check ) __kmp_pop_workshare( global_tid, ct_pdo, loc ); } /* * User routines which take C-style arguments (call by value) * different from the Fortran equivalent routines */ void ompc_set_num_threads( int arg ) { // !!!!! TODO: check the per-task binding __kmp_set_num_threads( arg, __kmp_entry_gtid() ); } void ompc_set_dynamic( int flag ) { kmp_info_t *thread; /* For the thread-private implementation of the internal controls */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__dynamic( thread, flag ? TRUE : FALSE ); } void ompc_set_nested( int flag ) { kmp_info_t *thread; /* For the thread-private internal controls implementation */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__nested( thread, flag ? TRUE : FALSE ); } void ompc_set_max_active_levels( int max_active_levels ) { /* TO DO */ /* we want per-task implementation of this internal control */ /* For the per-thread internal controls implementation */ __kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels ); } void ompc_set_schedule( omp_sched_t kind, int modifier ) { // !!!!! TODO: check the per-task binding __kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier ); } int ompc_get_ancestor_thread_num( int level ) { return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level ); } int ompc_get_team_size( int level ) { return __kmp_get_team_size( __kmp_entry_gtid(), level ); } void kmpc_set_stacksize( int arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_stacksize_s( size_t arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_blocktime( int arg ) { int gtid, tid; kmp_info_t *thread; gtid = __kmp_entry_gtid(); tid = __kmp_tid_from_gtid(gtid); thread = __kmp_thread_from_gtid(gtid); __kmp_aux_set_blocktime( arg, thread, tid ); } void kmpc_set_library( int arg ) { // __kmp_user_set_library initializes the library if needed __kmp_user_set_library( (enum library_type)arg ); } void kmpc_set_defaults( char const * str ) { // __kmp_aux_set_defaults initializes the library if needed __kmp_aux_set_defaults( str, KMP_STRLEN( str ) ); } void kmpc_set_disp_num_buffers( int arg ) { // ignore after initialization because some teams have already // allocated dispatch buffers if( __kmp_init_serial == 0 && arg > 0 ) __kmp_dispatch_num_buffers = arg; } int kmpc_set_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_set_affinity_mask_proc( proc, mask ); #endif } int kmpc_unset_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_unset_affinity_mask_proc( proc, mask ); #endif } int kmpc_get_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_get_affinity_mask_proc( proc, mask ); #endif } /* -------------------------------------------------------------------------- */ /*! @ingroup THREADPRIVATE @param loc source location information @param gtid global thread number @param cpy_size size of the cpy_data buffer @param cpy_data pointer to data to be copied @param cpy_func helper function to call for copying data @param didit flag variable: 1=single thread; 0=not single thread __kmpc_copyprivate implements the interface for the private data broadcast needed for the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran). All threads participating in the parallel region call this routine. One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1 and all other threads should have that variable set to 0. All threads pass a pointer to a data buffer (cpy_data) that they have built. The OpenMP specification forbids the use of nowait on the single region when a copyprivate clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid race conditions, so the code generation for the single region should avoid generating a barrier after the call to @ref __kmpc_copyprivate. The <tt>gtid</tt> parameter is the global thread id for the current thread. The <tt>loc</tt> parameter is a pointer to source location information. Internal implementation: The single thread will first copy its descriptor address (cpy_data) to a team-private location, then the other threads will each call the function pointed to by the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer. The cpy_func routine used for the copy and the contents of the data area defined by cpy_data and cpy_size may be built in any fashion that will allow the copy to be done. For instance, the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers to the data. The cpy_func routine must interpret the cpy_data buffer appropriately. The interface to cpy_func is as follows: @code void cpy_func( void *destination, void *source ) @endcode where void *destination is the cpy_data pointer for the thread being copied to and void *source is the cpy_data pointer for the thread being copied from. */ void __kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit ) { void **data_ptr; KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid )); KMP_MB(); data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data; if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); } } /* ToDo: Optimize the following two barriers into some kind of split barrier */ if (didit) *data_ptr = cpy_data; /* This barrier is not a barrier region boundary */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); if (! didit) (*cpy_func)( cpy_data, *data_ptr ); /* Consider next barrier the user-visible barrier for barrier region boundaries */ /* Nesting checks are already handled by the single construct checks */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location) #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); } /* -------------------------------------------------------------------------- */ #define INIT_LOCK __kmp_init_user_lock_with_checks #define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks #define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks #define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed #define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks #define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed #define RELEASE_LOCK __kmp_release_user_lock_with_checks #define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks #define TEST_LOCK __kmp_test_user_lock_with_checks #define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks #define DESTROY_LOCK __kmp_destroy_user_lock_with_checks #define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks /* * TODO: Make check abort messages use location info & pass it * into with_checks routines */ #if KMP_USE_DYNAMIC_LOCK // internal lock initializer static __forceinline void __kmp_init_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) { if (KMP_IS_D_LOCK(seq)) { KMP_INIT_D_LOCK(lock, seq); #if USE_ITT_BUILD __kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL); #endif } else { KMP_INIT_I_LOCK(lock, seq); #if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); __kmp_itt_lock_creating(ilk->lock, loc); #endif } } // internal nest lock initializer static __forceinline void __kmp_init_nest_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) { #if KMP_USE_TSX // Don't have nested lock implementation for speculative locks if (seq == lockseq_hle || seq == lockseq_rtm || seq == lockseq_adaptive) seq = __kmp_user_lock_seq; #endif switch (seq) { case lockseq_tas: seq = lockseq_nested_tas; break; #if KMP_USE_FUTEX case lockseq_futex: seq = lockseq_nested_futex; break; #endif case lockseq_ticket: seq = lockseq_nested_ticket; break; case lockseq_queuing: seq = lockseq_nested_queuing; break; case lockseq_drdpa: seq = lockseq_nested_drdpa; break; default: seq = lockseq_nested_queuing; } KMP_INIT_I_LOCK(lock, seq); #if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); __kmp_itt_lock_creating(ilk->lock, loc); #endif } /* initialize the lock with a hint */ void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) { KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock_with_hint"); } __kmp_init_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); } /* initialize the lock with a hint */ void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) { KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock_with_hint"); } __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); } #endif // KMP_USE_DYNAMIC_LOCK /* initialize the lock */ void __kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock"); } __kmp_init_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_init_lock)) { ompt_callbacks.ompt_callback(ompt_event_init_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_lock /* initialize the lock */ void __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock"); } __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_nest_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_NESTED_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)) { ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_nest_lock void __kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_user_lock_p lck; if (KMP_EXTRACT_D_TAG(user_lock) == 0) { lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock; } else { lck = (kmp_user_lock_p)user_lock; } __kmp_itt_lock_destroyed(lck); # endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" ); } #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_destroy_lock)) { ompt_callbacks.ompt_callback(ompt_event_destroy_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_lock /* destroy the lock */ void __kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock); __kmp_itt_lock_destroyed(ilk->lock); # endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" ); } #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)) { ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_NESTED_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_nest_lock void __kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { KMP_COUNT_BLOCK(OMP_set_lock); #if KMP_USE_DYNAMIC_LOCK int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object. # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_ACQUIRE_TAS_LOCK(user_lock, gtid); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid); } # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_LOCK( lck, gtid ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_acquired_lock)) { ompt_callbacks.ompt_callback(ompt_event_acquired_lock)((uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); #endif #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { // missing support here: need to know whether acquired first or not } #endif #else // KMP_USE_DYNAMIC_LOCK int acquire_status; kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_NESTED_LOCK( lck, gtid, &acquire_status ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { if (acquire_status == KMP_LOCK_ACQUIRED_FIRST) { if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)) ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)((uint64_t) lck); } else { if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)) ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)((uint64_t) lck); } } #endif #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_RELEASE_TAS_LOCK(user_lock, gtid); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_RELEASE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid); } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ /* release the lock */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0); KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ RELEASE_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_lock)) { ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* release the lock */ void __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock; #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ if ( --(tl->lk.depth_locked) == 0 ) { TCW_4(tl->lk.poll, 0); } KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ int release_status; release_status = RELEASE_NESTED_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled) { if (release_status == KMP_LOCK_RELEASED) { if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)( (uint64_t) lck); } } else if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)( (uint64_t) lck); } } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { KMP_COUNT_BLOCK(OMP_test_lock); #if KMP_USE_DYNAMIC_LOCK int rc; int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_TEST_TAS_LOCK(user_lock, gtid, rc); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc); } else # endif { rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid); } if (rc) { # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif return FTN_TRUE; } else { # if USE_ITT_BUILD __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); # endif return FTN_FALSE; } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return ( rc ? FTN_TRUE : FTN_FALSE ); /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int rc; # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD if (rc) { __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); } else { __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); } # endif return rc; #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_NESTED_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return rc; /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /*--------------------------------------------------------------------------------------------------------------------*/ /* * Interface to fast scalable reduce methods routines */ // keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions; // another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then) // AT: which solution is better? #define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \ ( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) ) #define __KMP_GET_REDUCTION_METHOD(gtid) \ ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) // description of the packed_reduction_method variable: look at the macros in kmp.h // used in a critical section reduce block static __forceinline void __kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { // this lock was visible to a customer and to the threading profile tool as a serial overhead span // (although it's used for an internal purpose only) // why was it visible in previous implementation? // should we keep it visible in new reduce block? kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; // Check if it is initialized. if (*lk == 0) { if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(__kmp_user_lock_seq)); } else { __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(__kmp_user_lock_seq)); } } // Branch for accessing the actual lock object and set operation. This branching is inevitable since // this lock initialization does not follow the normal dispatch path (lock table is not used). if (KMP_EXTRACT_D_TAG(lk) != 0) { lck = (kmp_user_lock_p)lk; KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); } else { kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); lck = ilk->lock; KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers // with 32 byte critical sections. If there isn't enough space, then we // have to use a pointer. if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) { lck = (kmp_user_lock_p)crit; } else { lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } KMP_DEBUG_ASSERT( lck != NULL ); if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); __kmp_acquire_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // used in a critical section reduce block static __forceinline void __kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical // sections. If there isn't enough space, then we have to use a pointer. if ( __kmp_base_user_lock_size > 32 ) { lck = *( (kmp_user_lock_p *) crit ); KMP_ASSERT( lck != NULL ); } else { lck = (kmp_user_lock_p) crit; } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); __kmp_release_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // __kmp_end_critical_section_reduce_block /* 2.a.i. Reduce Block without a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed The nowait version is used for a reduce clause with the nowait argument. */ kmp_int32 __kmpc_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_nowait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; #if OMP_40_ENABLED kmp_team_t *team; kmp_info_t *th; int teams_swapped = 0, task_state; #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be used as a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif #if OMP_40_ENABLED th = __kmp_thread_from_gtid(global_tid); if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct? team = th->th.th_team; if( team->t.t_level == th->th.th_teams_level ) { // this is reduction at teams construct KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0 // Let's swap teams temporarily for the reduction barrier teams_swapped = 1; th->th.th_info.ds.ds_tid = team->t.t_master_tid; th->th.th_team = team->t.t_parent; th->th.th_team_nproc = th->th.th_team->t.t_nproc; th->th.th_task_team = th->th.th_team->t.t_task_team[0]; task_state = th->th.th_task_state; th->th.th_task_state = 0; } } #endif // OMP_40_ENABLED // packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable // the variable should be either a construct-specific or thread-specific property, not a team specific property // (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct) // an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?) // (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed) // a thread-specific variable is better regarding two issues above (next construct and extra syncs) // a thread-specific "th_local.reduction_method" variable is used currently // each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs) packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; // all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen) // (it's not quite good, because the checking block has been closed by this 'pop', // but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction) if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //AT: performance issue: a real barrier here //AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them) //AT: (it's not what a customer might expect specifying NOWAIT clause) //AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer) //AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster // and be more in line with sense of NOWAIT //AT: TO DO: do epcc test and compare times // this barrier should be invisible to a customer and to the threading profile tool // (it's neither a terminating barrier nor customer's code, it's used for an internal purpose) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers will get to __kmpc_end_reduce_nowait() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } #if OMP_40_ENABLED if( teams_swapped ) { // Restore thread structure th->th.th_info.ds.ds_tid = 0; th->th.th_team = team; th->th.th_team_nproc = team->t.t_nproc; th->th.th_task_team = team->t.t_task_team[task_state]; th->th.th_task_state = task_state; } #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a reduce nowait. */ void __kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( on Intel platforms only ) } else if( packed_reduction_method == atomic_reduce_block ) { // neither master nor other workers should get here // (code gen does not generate this call in case 2: atomic reduce block) // actually it's better to remove this elseif at all; // after removal this value will checked by the 'else' and will assert } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master gets here } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } /* 2.a.ii. Reduce Block with a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed A blocking reduce that includes an implicit barrier. */ kmp_int32 __kmpc_reduce( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_wait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //case tree_reduce_block: // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers except master will enter __kmpc_end_reduce() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { // 0: all other workers; 1: master __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a blocking reduce. The <tt>lck</tt> pointer must be the same as that used in the corresponding start function. */ void __kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == atomic_reduce_block ) { // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master executes here (master releases all other workers) __kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid ); } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } #undef __KMP_GET_REDUCTION_METHOD #undef __KMP_SET_REDUCTION_METHOD /*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/ kmp_uint64 __kmpc_get_taskid() { kmp_int32 gtid; kmp_info_t * thread; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); return thread->th.th_current_task->td_task_id; } // __kmpc_get_taskid kmp_uint64 __kmpc_get_parent_taskid() { kmp_int32 gtid; kmp_info_t * thread; kmp_taskdata_t * parent_task; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); parent_task = thread->th.th_current_task->td_parent; return ( parent_task == NULL ? 0 : parent_task->td_task_id ); } // __kmpc_get_parent_taskid void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT) { if ( ! __kmp_init_serial ) { __kmp_serial_initialize(); } __kmp_place_num_sockets = nS; __kmp_place_socket_offset = sO; __kmp_place_num_cores = nC; __kmp_place_core_offset = cO; __kmp_place_num_threads_per_core = nT; } #if OMP_45_ENABLED /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. @param num_dims number of associated doacross loops. @param dims info on loops bounds. Initialize doacross loop information. Expect compiler send us inclusive bounds, e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2. */ void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, struct kmp_dim * dims) { int j, idx; kmp_int64 last, trace_count; kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_uint32 *flags; kmp_disp_t *pr_buf = th->th.th_dispatch; dispatch_shared_info_t *sh_buf; KA_TRACE(20,("__kmpc_doacross_init() enter: called T#%d, num dims %d, active %d\n", gtid, num_dims, !team->t.t_serialized)); KMP_DEBUG_ASSERT(dims != NULL); KMP_DEBUG_ASSERT(num_dims > 0); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_init() exit: serialized team\n")); return; // no dependencies if team is serialized } KMP_DEBUG_ASSERT(team->t.t_nproc > 1); idx = pr_buf->th_doacross_buf_idx++; // Increment index of shared buffer for the next loop sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers]; // Save bounds info into allocated private buffer KMP_DEBUG_ASSERT(pr_buf->th_doacross_info == NULL); pr_buf->th_doacross_info = (kmp_int64*)__kmp_thread_malloc(th, sizeof(kmp_int64)*(4 * num_dims + 1)); KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); pr_buf->th_doacross_info[0] = (kmp_int64)num_dims; // first element is number of dimensions // Save also address of num_done in order to access it later without knowing the buffer index pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done; pr_buf->th_doacross_info[2] = dims[0].lo; pr_buf->th_doacross_info[3] = dims[0].up; pr_buf->th_doacross_info[4] = dims[0].st; last = 5; for( j = 1; j < num_dims; ++j ) { kmp_int64 range_length; // To keep ranges of all dimensions but the first dims[0] if( dims[j].st == 1 ) { // most common case // AC: should we care of ranges bigger than LLONG_MAX? (not for now) range_length = dims[j].up - dims[j].lo + 1; } else { if( dims[j].st > 0 ) { KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo); range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1; } else { // negative increment KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up); range_length = (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1; } } pr_buf->th_doacross_info[last++] = range_length; pr_buf->th_doacross_info[last++] = dims[j].lo; pr_buf->th_doacross_info[last++] = dims[j].up; pr_buf->th_doacross_info[last++] = dims[j].st; } // Compute total trip count. // Start with range of dims[0] which we don't need to keep in the buffer. if( dims[0].st == 1 ) { // most common case trace_count = dims[0].up - dims[0].lo + 1; } else if( dims[0].st > 0 ) { KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo); trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1; } else { // negative increment KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up); trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1; } for( j = 1; j < num_dims; ++j ) { trace_count *= pr_buf->th_doacross_info[4 * j + 1]; // use kept ranges } KMP_DEBUG_ASSERT(trace_count > 0); // Check if shared buffer is not occupied by other loop (idx - __kmp_dispatch_num_buffers) if( idx != sh_buf->doacross_buf_idx ) { // Shared buffer is occupied, wait for it to be free __kmp_wait_yield_4( (kmp_uint32*)&sh_buf->doacross_buf_idx, idx, __kmp_eq_4, NULL ); } // Check if we are the first thread. After the CAS the first thread gets 0, // others get 1 if initialization is in progress, allocated pointer otherwise. flags = (kmp_uint32*)KMP_COMPARE_AND_STORE_RET64( (kmp_int64*)&sh_buf->doacross_flags,NULL,(kmp_int64)1); if( flags == NULL ) { // we are the first thread, allocate the array of flags kmp_int64 size = trace_count / 8 + 8; // in bytes, use single bit per iteration sh_buf->doacross_flags = (kmp_uint32*)__kmp_thread_calloc(th, size, 1); } else if( (kmp_int64)flags == 1 ) { // initialization is still in progress, need to wait while( (volatile kmp_int64)sh_buf->doacross_flags == 1 ) { KMP_YIELD(TRUE); } } KMP_DEBUG_ASSERT((kmp_int64)sh_buf->doacross_flags > 1); // check value of pointer pr_buf->th_doacross_flags = sh_buf->doacross_flags; // save private copy in order to not // touch shared buffer on each iteration KA_TRACE(20,("__kmpc_doacross_init() exit: T#%d\n", gtid)); } void __kmpc_doacross_wait(ident_t *loc, int gtid, long long *vec) { kmp_int32 shft, num_dims, i; kmp_uint32 flag; kmp_int64 iter_number; // iteration number of "collapsed" loop nest kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf; kmp_int64 lo, up, st; KA_TRACE(20,("__kmpc_doacross_wait() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_wait() exit: serialized team\n")); return; // no dependencies if team is serialized } // calculate sequential iteration number and check out-of-bounds condition pr_buf = th->th.th_dispatch; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); num_dims = pr_buf->th_doacross_info[0]; lo = pr_buf->th_doacross_info[2]; up = pr_buf->th_doacross_info[3]; st = pr_buf->th_doacross_info[4]; if( st == 1 ) { // most common case if( vec[0] < lo || vec[0] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = vec[0] - lo; } else if( st > 0 ) { if( vec[0] < lo || vec[0] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = (kmp_uint64)(vec[0] - lo) / st; } else { // negative increment if( vec[0] > lo || vec[0] < up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = (kmp_uint64)(lo - vec[0]) / (-st); } for( i = 1; i < num_dims; ++i ) { kmp_int64 iter, ln; kmp_int32 j = i * 4; ln = pr_buf->th_doacross_info[j + 1]; lo = pr_buf->th_doacross_info[j + 2]; up = pr_buf->th_doacross_info[j + 3]; st = pr_buf->th_doacross_info[j + 4]; if( st == 1 ) { if( vec[i] < lo || vec[i] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = vec[i] - lo; } else if( st > 0 ) { if( vec[i] < lo || vec[i] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = (kmp_uint64)(vec[i] - lo) / st; } else { // st < 0 if( vec[i] > lo || vec[i] < up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = (kmp_uint64)(lo - vec[i]) / (-st); } iter_number = iter + ln * iter_number; } shft = iter_number % 32; // use 32-bit granularity iter_number >>= 5; // divided by 32 flag = 1 << shft; while( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) { KMP_YIELD(TRUE); } KA_TRACE(20,("__kmpc_doacross_wait() exit: T#%d wait for iter %lld completed\n", gtid, (iter_number<<5)+shft)); } void __kmpc_doacross_post(ident_t *loc, int gtid, long long *vec) { kmp_int32 shft, num_dims, i; kmp_uint32 flag; kmp_int64 iter_number; // iteration number of "collapsed" loop nest kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf; kmp_int64 lo, st; KA_TRACE(20,("__kmpc_doacross_post() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_post() exit: serialized team\n")); return; // no dependencies if team is serialized } // calculate sequential iteration number (same as in "wait" but no out-of-bounds checks) pr_buf = th->th.th_dispatch; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); num_dims = pr_buf->th_doacross_info[0]; lo = pr_buf->th_doacross_info[2]; st = pr_buf->th_doacross_info[4]; if( st == 1 ) { // most common case iter_number = vec[0] - lo; } else if( st > 0 ) { iter_number = (kmp_uint64)(vec[0] - lo) / st; } else { // negative increment iter_number = (kmp_uint64)(lo - vec[0]) / (-st); } for( i = 1; i < num_dims; ++i ) { kmp_int64 iter, ln; kmp_int32 j = i * 4; ln = pr_buf->th_doacross_info[j + 1]; lo = pr_buf->th_doacross_info[j + 2]; st = pr_buf->th_doacross_info[j + 4]; if( st == 1 ) { iter = vec[i] - lo; } else if( st > 0 ) { iter = (kmp_uint64)(vec[i] - lo) / st; } else { // st < 0 iter = (kmp_uint64)(lo - vec[i]) / (-st); } iter_number = iter + ln * iter_number; } shft = iter_number % 32; // use 32-bit granularity iter_number >>= 5; // divided by 32 flag = 1 << shft; if( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) KMP_TEST_THEN_OR32( (kmp_int32*)&pr_buf->th_doacross_flags[iter_number], (kmp_int32)flag ); KA_TRACE(20,("__kmpc_doacross_post() exit: T#%d iter %lld posted\n", gtid, (iter_number<<5)+shft)); } void __kmpc_doacross_fini(ident_t *loc, int gtid) { kmp_int64 num_done; kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf = th->th.th_dispatch; KA_TRACE(20,("__kmpc_doacross_fini() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_fini() exit: serialized team %p\n", team)); return; // nothing to do } num_done = KMP_TEST_THEN_INC64((kmp_int64*)pr_buf->th_doacross_info[1]) + 1; if( num_done == th->th.th_team_nproc ) { // we are the last thread, need to free shared resources int idx = pr_buf->th_doacross_buf_idx - 1; dispatch_shared_info_t *sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers]; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info[1] == (kmp_int64)&sh_buf->doacross_num_done); KMP_DEBUG_ASSERT(num_done == (kmp_int64)sh_buf->doacross_num_done); KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx); __kmp_thread_free(th, (void*)sh_buf->doacross_flags); sh_buf->doacross_flags = NULL; sh_buf->doacross_num_done = 0; sh_buf->doacross_buf_idx += __kmp_dispatch_num_buffers; // free buffer for future re-use } // free private resources (need to keep buffer index forever) __kmp_thread_free(th, (void*)pr_buf->th_doacross_info); pr_buf->th_doacross_info = NULL; KA_TRACE(20,("__kmpc_doacross_fini() exit: T#%d\n", gtid)); } #endif // end of file //
rnn_helpers.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #ifdef _WIN32 #pragma warning(disable : 4267) #endif #include <algorithm> #include <functional> #include <future> #include <string> #include <vector> #include "gsl/span" #include "gsl/gsl_algorithm" #include "core/common/common.h" #include "core/common/logging/logging.h" #include "core/framework/allocator.h" #include "core/util/math.h" #include "core/util/math_cpuonly.h" #include "core/platform/threadpool.h" namespace onnxruntime { class Tensor; class OpKernelContext; namespace rnn { namespace detail { enum Direction { kForward = 0, kReverse = 1, kBidirectional = 2 }; inline Direction MakeDirection(const std::string& direction) { if (direction == "forward") { return kForward; } if (direction == "reverse") { return kReverse; } if (direction == "bidirectional") { return kBidirectional; } ORT_THROW("Invalid 'direction' argument of '", direction, "'. Must be one of 'forward', 'reverse', or 'bidirectional'."); } /** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe @param allocator IAllocator to use for the allocation. @param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'. @param unique_ptr unique_ptr that will control the lifetime of the allocated memory. @param fill If true, fill the allocated memory with fill_value. @param fill_value Value to use if 'fill' is true. @returns A span to provide bounds checked access to the allocated memory. */ template <typename TAlloc> gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator, size_t size, IAllocatorUniquePtr<TAlloc>& unique_ptr, bool fill = false, TAlloc fill_value = TAlloc{}) { unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size); auto span = gsl::make_span(unique_ptr.get(), size); if (fill) { // Do't use span.begin() it will cause performance issue and stop compiler to optimize the code std::fill_n(unique_ptr.get(), size, fill_value); } return span; } // validate the common inputs to RNN, LSTM and GRU operators Status ValidateCommonRnnInputs(const Tensor& X, const Tensor& W, const Tensor& R, const Tensor* B, int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs const Tensor* sequence_lens, const Tensor* initial_h, int64_t num_directions, int64_t hidden_size); /// Copy an input array repeatedly to an output array /// @param input_begin Beginning of input /// @param input_end End of input /// @param output Output iterator /// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized. /// @returns Position of output iterator after copy is completed template <typename TInIter, typename TOutIter> TOutIter RepeatVectorToConstructArray(TInIter input_begin, TInIter input_end, TOutIter output, int64_t repetitions) { for (int64_t i = 0; i < repetitions; i++) { output = std::copy(input_begin, input_end, output); } return output; } // reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size] // and output to shape [seq_length, num_directions, batch_size, hidden_size] template <typename T> void ReverseSequence(gsl::span<const T> inputs, gsl::span<T> inputs_reverse, gsl::span<const int> sequence_lengths, const int max_sequence_length, const int batch_size, const int input_size, const int num_directions) { for (int i = 0; i < batch_size; i++) { int seq_len = sequence_lengths[i]; #ifdef USE_OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = 0; j < seq_len; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } #ifdef USE_OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = seq_len; j < max_sequence_length; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } } } // A has size M x K, B has size N x K (transposed), and C has size M x N // We check that A, B and C are large enough before calling the lower level GEMM implementation template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter> void ComputeGemm(const int M, const int N, const int K, const float alpha, TSpanAIter A, TSpanAIter A_end, const int lda, TSpanBIter B, TSpanBIter B_end, const int ldb, const float beta, TSpanCIter C, TSpanCIter C_end, const int ldc, concurrency::ThreadPool* tp) { // validate all the inputs // need to use the lda/ldb/ldc strides which should be >= the columns for the span ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N); ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end); ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end); ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end); ::onnxruntime::math::GemmEx<float>( CblasNoTrans, CblasTrans, M, N, K, alpha, &*A, lda, &*B, ldb, beta, &*C, ldc, tp); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur, typename gsl::span<T>::const_iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data(); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T>::iterator cur, typename gsl::span<T>::iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data() + offset; } template <typename TLambda> void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, int step, onnxruntime::concurrency::ThreadPool& ttp, const ::onnxruntime::logging::Logger& logger) { // #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug #ifdef NOTHREADS ORT_UNUSED_PARAMETER(ttp); ORT_UNUSED_PARAMETER(logger); for (int i = 0; i < max; i += step) { (void)name; std::bind(lambda, i)(); } #else ORT_UNUSED_PARAMETER(name); ORT_UNUSED_PARAMETER(logger); std::atomic<int> done(0); for (int i = 0; i < max; i += step) { ttp.Schedule([lambda, i, &done]() { lambda(i); ++done; }); } int totalTasks = max / (step > 0 ? step : 1) + (max % step > 0 ? 1 : 0); while (done != totalTasks) ; #endif } void DumpMatrixImpl(const std::string& name, const float* src, int row, int col, int offset = 0, int col_width = -1); // Helper class to wrap the processing of the activation funcs and any alpha/beta values. // The alpha/beta values are consumed in the order of the activation funcs. once they run out // defaults will be used as needed. // The Entries property contains the normalized function names and the alpha/beta value to use. class ActivationFuncs { public: struct Entry { const std::string name; const float alpha; const float beta; }; ActivationFuncs() = default; ActivationFuncs(const std::vector<std::string>& funcs, const std::vector<float>& alphas, const std::vector<float>& betas); const std::vector<Entry>& Entries() const { return entries_; } private: std::vector<Entry> entries_; }; namespace deepcpu { using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int); using ClipWithBiasFuncPtr = void (*)(float, const float*, float*, const int); using ActivationFuncPtr = void (*)(float*, int, float, float); using ActivationFuncBPtr = void (*)(const float*, float*, int, float, float); using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, int, float, float); using GruResetGateFuncPtr = void (*)(const float*, float*, float*, int, float, float); using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, int, float, float); ActivationFuncPtr ActivationFuncByName(const std::string& func); LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func); GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func); GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func); void add_bias_into_ignore(const float* ignored, const float* pd, int c); void add_bias_into(const float* ps, float* pd, int c); void clip(float b, float* pd, int c); void clip_add_bias(float b, const float* pb, float* pd, int c); void clip_ignore_bias(float b, const float* pb, float* pd, int c); void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void relu_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid(float* pd, int c, float alpha, float beta); void tanh(float* pd, int c, float alpha, float beta); void relu(float* pd, int c, float alpha, float beta); void sigmoid_exact(float* pd, int c, float alpha, float beta); void tanh_exact(float* pd, int c, float alpha, float beta); void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr, int c); void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_relu(const float* ps1, const float* ps2, float* pd, int c, float alpha, float beta); void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_relu(const float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); inline void elementwise_product(const float* op1, const float* op2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += op1[i] * op2[i]; } inline void elementwise_sum1(const float* src, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src[i]; } inline void elementwise_sum2(const float* src1, const float* src2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src1[i] + src2[i]; } } // namespace deepcpu } // namespace detail } // namespace rnn } // namespace onnxruntime
pzgeswp.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma_async.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include <plasma_core_blas.h> #define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_pzgeswp(plasma_enum_t colrow, plasma_desc_t A, int *ipiv, int incx, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; if (colrow == PlasmaRowwise) { for (int n = 0; n < A.nt; n++) { plasma_complex64_t *a00, *a10; a00 = A(0, n); a10 = A(A.mt-1, n); // Multidependency of the whole panel on its individual tiles. for (int m = 1; m < A.mt-1; m++) { plasma_complex64_t *amn = A(m, n); #pragma omp task depend (in:amn[0]) \ depend (inout:a00[0]) { int l = 1; l++; } } int ma00 = (A.mt-1)*A.mb; int na00 = plasma_tile_nmain(A, n); int lda10 = plasma_tile_mmain(A, A.mt-1); int nva10 = plasma_tile_nview(A, n); #pragma omp task depend (in:ipiv[0:A.m]) \ depend (inout:a00[0:ma00*na00]) \ depend (inout:a10[0:lda10*nva10]) { int nvan = plasma_tile_nview(A, n); plasma_desc_t view = plasma_desc_view(A, 0, n*A.nb, A.m, nvan); plasma_core_zgeswp(colrow, view, 1, A.m, ipiv, incx); } // Multidependency of individual tiles on the whole panel. for (int m = 1; m < A.mt-1; m++) { plasma_complex64_t *amn = A(m, n); #pragma omp task depend (in:a00[0]) \ depend (inout:amn[0]) { int l = 1; l++; } } } } else { // PlasmaColumnwise for (int m = 0; m < A.mt; m++) { plasma_complex64_t *a00, *a01; a00 = A(m, 0); a01 = A(m, A.nt-1); // Multidependency of the whole (row) panel on its individual tiles. for (int n = 1; n < A.nt-1; n++) { plasma_complex64_t *amn = A(m, n); #pragma omp task depend (in:amn[0]) \ depend (inout:a00[0]) { int l = 1; l++; } } #pragma omp task depend (in:ipiv[0:A.n]) \ depend (inout:a00[0]) \ depend (inout:a01[0]) { int mvam = plasma_tile_mview(A, m); plasma_desc_t view = plasma_desc_view(A, m*A.mb, 0, mvam, A.n); plasma_core_zgeswp(colrow, view, 1, A.n, ipiv, incx); } // Multidependency of individual tiles on the whole (row) panel. for (int n = 1; n < A.nt-1; n++) { plasma_complex64_t *amn = A(m, n); #pragma omp task depend (in:a00[0]) \ depend (inout:amn[0]) { int l = 1; l++; } } } } }
serial_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "col_sampler.hpp" #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "monotone_constraints.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif namespace LightGBM { using json11::Json; /*! \brief forward declaration */ class CostEfficientGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: friend CostEfficientGradientBoosting; explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data, bool is_constant_hessian) override { ResetTrainingDataInner(train_data, is_constant_hessian, true); } void ResetIsConstantHessian(bool is_constant_hessian) override { share_state_->is_constant_hessian = is_constant_hessian; } virtual void ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin); void ResetConfig(const Config* config) override; inline void SetForcedSplit(const Json* forced_split_json) override { if (forced_split_json != nullptr && !forced_split_json->is_null()) { forced_split_json_ = forced_split_json; } else { forced_split_json_ = nullptr; } } Tree* Train(const score_t* gradients, const score_t *hessians) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) override; void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override { if (subset == nullptr) { data_partition_->SetUsedDataIndices(used_indices, num_data); share_state_->is_use_subrow = false; } else { ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false); share_state_->is_use_subrow = true; share_state_->is_subrow_copied = false; share_state_->bagging_use_indices = used_indices; share_state_->bagging_indices_cnt = num_data; } } void AddPredictionToScore(const Tree* tree, double* out_score) const override { if (tree->num_leaves() <= 1) { return; } CHECK_LE(tree->num_leaves(), data_partition_->num_leaves()); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; /*! \brief Get output of parent node, used for path smoothing */ double GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const; protected: void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_, int feature_index, int real_fidx, bool is_feature_used, int num_data, const LeafSplits* leaf_splits, SplitInfo* best_split, double parent_output); void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time); void RecomputeBestSplitForLeaf(int leaf, SplitInfo* split); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(const Tree* tree); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree*); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf) { SplitInner(tree, best_leaf, left_leaf, right_leaf, true); } void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf, bool update_cnt); /* Force splits with forced_split_json dict and then return num splits forced.*/ int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf, int* cur_depth); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores minimum and maximum constraints for each leaf */ std::unique_ptr<LeafConstraintsBase> constraints_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #elif USE_CUDA /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_; #endif /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; ColSampler col_sampler_; const Json* forced_split_json_; std::unique_ptr<TrainingShareStates> share_state_; std::unique_ptr<CostEfficientGradientBoosting> cegb_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
p4.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define NRA 620 /* number of rows in matrix A */ #define NCA 150 /* number of columns in matrix A */ #define NCB 70 /* number of columns in matrix B */ int main (int argc, char *argv[]) { int tid, nthreads, i, j, k, chunk; double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ /*** Initialize matrices ***/ for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; for (i=0; i<NRA; i++) for (j=0; j<NCB; j++) c[i][j]= 0; omp_set_num_threads(4); double begin = omp_get_wtime(); #pragma omp parallel for schedule(static,1) for (i=0; i<NRA; i+=2) { for (k=0; k<NCA; k+=2) { for (j=0; j<NCB; j++) { c[i][j] += a[i][k] * b[k][j]; c[i+1][j] += a[i+1][k] * b[k][j]; c[i][j] += a[i][k+1] * b[k+1][j]; c[i+1][j] += a[i+1][k+1] * b[k+1][j]; } } } double time_spent = (double)(omp_get_wtime() - begin); { /*** Print results ***/ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); printf("\n"); } printf("******************************************************\n"); printf ("Done.\n"); printf ("Time: %f\n", time_spent); } }
phSpline.h
/*! * ***************************************************************************** * \file phSpline.h * \author moennen * \brief implementation of a polyharmonic spline interpolant * \date 2018-03-16 * *****************************************************************************/ #ifndef _UTILS_PHSPLINE_H #define _UTILS_PHSPLINE_H #include <Eigen/Dense> #include <vector> template <unsigned InDim = 2, unsigned OutDim = InDim, unsigned Order = 2, typename Real = float> class PhSpline final { const size_t _nCtrlPts; // Params of the spline : OutDim x ( _nCtrPts ( weights ) + InDim + 1 ( // affine ) ) std::vector<Real> _params; template <typename t, int dim = Eigen::Dynamic> using Vector = Eigen::Matrix<t, dim, 1>; using Matrix = Eigen::Matrix<Real, Eigen::Dynamic, Eigen::Dynamic>; public: PhSpline( const Real* ctrlPts, const Real* ctrlFunc, const size_t nCtrlPts ) : _nCtrlPts( nCtrlPts ), _params( OutDim * ( _nCtrlPts + InDim + 1 ) ) { // reject unsolvable system assert( _nCtrlPts > InDim ); fit( ctrlPts, ctrlFunc, _nCtrlPts, &_params[0] ); } // given a point x of InDim dimensions return the corresponding function value // y // in OutDim dimensions inline void operator()( const Real* ctrlPts, const Real* x, Real* y ) const { using namespace Eigen; const size_t nParams = _nCtrlPts + InDim + 1; const auto mapX = Map<const Vector<Real, InDim> >( x ); const auto mapCtrlPts = Map<const Matrix>( ctrlPts, InDim, _nCtrlPts ); Vector<Real> vecX( nParams ); for ( unsigned pt = 0; pt < _nCtrlPts; ++pt ) { vecX[pt] = psi( ( mapX - mapCtrlPts.col( pt ) ).norm() ); } vecX.template segment<InDim>( _nCtrlPts ) = mapX; vecX[nParams - 1] = 1.0; // compute the interpolated function value auto vecY = Map<Vector<Real, OutDim> >( y ); vecY = vecX.adjoint() * Map<const Matrix>( &_params[0], nParams, OutDim ); } inline size_t getNParams() const { return _nCtrlPts + InDim + 1; } inline const Real* getParams() const & { return &_params[0]; } private: static inline Real psi( Real r ) { return ( Order % 2 ) ? std::pow( r, Order ) : r < 1.0f ? std::pow( r, Order - 1 ) * std::log( std::pow( r, r ) ) : std::pow( r, Order ) * std::log( r ); } // estimate the weight st // _w = argmin_w ( ||eval(ref,w,ctrl) - probe||^2 ) static inline void fit( const Real* ctrlPts, const Real* ctrlFuncs, const size_t nCtrlPts, Real* params ) { using namespace Eigen; // system matrix // --> const size_t nParams = nCtrlPts + InDim + 1; Matrix S( nParams, nParams ); //#pragma omp parallel for for ( size_t r = 0; r < nCtrlPts; ++r ) { auto rx = Map<const Vector<Real, InDim> >( &ctrlPts[r * InDim] ); for ( unsigned c = 0; c < nCtrlPts; ++c ) { auto cx = Map<const Vector<Real, InDim> >( &ctrlPts[c * InDim] ); S( r, c ) = psi( ( rx - cx ).norm() ); } } //#pragma omp parallel for for ( size_t c = 0; c < nCtrlPts; ++c ) { auto cx = Map<const Vector<Real, InDim> >( &ctrlPts[c * InDim] ); S.col( c ).template segment<InDim>( nCtrlPts ) = cx; S.row( c ).template segment<InDim>( nCtrlPts ) = cx; } S.template block<InDim + 1, InDim + 1>( nCtrlPts, nCtrlPts ).setZero(); S.col( nParams - 1 ).topRows( nCtrlPts ).setOnes(); S.row( nParams - 1 ).leftCols( nCtrlPts ).setOnes(); Matrix StS( nParams, nParams ); StS.template triangularView<Lower>() = S.transpose() * S; Vector<Real> f( nParams ); f.template bottomRows<InDim + 1>().setZero(); for ( size_t d = 0; d < OutDim; ++d ) { for ( size_t c = 0; c < nCtrlPts; ++c ) { f[c] = ctrlFuncs[c * OutDim + d]; } Matrix Stf = S.transpose() * f; StS.ldlt().solveInPlace( Stf ); Map<Vector<Real> >( &params[d * nParams], nParams ) = Stf; } } }; #endif // _UTILS_PHSPLINE_H
scan.c
/** * scan.c * Authors: Yizhao Gao <yizhaotsccsj@gmail.com> * Date: {08/01/2017} */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> void getCCCount(double * x, double * y, int * nCass, int * nCons, int locCount, double wSize, int wCount, int * casInW, int * conInW) { double distance; int minWindow; for(int i = 0; i < locCount * wCount; i++) { casInW[i] = 0; conInW[i] = 0; } #pragma omp parallel for private(distance, minWindow) for(int i = 0; i < locCount; i++) { for(int j = 0; j < locCount; j++) { distance = sqrt((x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j])); minWindow = (int)(ceil(distance / wSize)); if(minWindow > 0) minWindow --; for(int k = minWindow; k < wCount; k++) { casInW[i * wCount + k] += nCass[j]; conInW[i * wCount + k] += nCons[j]; } } } return; } void loglikelihood(double * ll, int * casInW, int * conInW, int totalWindow, int casCount, int conCount, int highLow) { double cas, con, tot; double llTemp; int totCount = casCount + conCount; bool highCluster = true; bool lowCluster = true; if(highLow == 1) lowCluster = false; else if(highLow == -1) highCluster = false; #pragma omp parallel for private(cas, con, tot, llTemp) for(int i = 0; i < totalWindow; i++) { cas = casInW[i]; con = conInW[i]; tot = cas + con; if(cas == -1) { ll[i] = 1; } else if(cas * conCount > con * casCount) { //High cluster of cases if(highCluster) { llTemp = cas * log(cas/tot); if(con > 0) llTemp += con * log(con/tot); if(casCount > cas) llTemp += (casCount - cas) * log((casCount - cas)/(totCount - tot)); if(conCount > con) llTemp += (conCount - con) * log((conCount - con)/(totCount - tot)); ll[i] = llTemp; } else ll[i] = 1; } else { //Low cluster of cases if(lowCluster) { llTemp = con * log(con/tot); if(cas > 0) llTemp += cas * log(cas/tot); if(casCount > cas) llTemp += (casCount - cas) * log((casCount - cas)/(totCount - tot)); if(conCount > con) llTemp += (conCount - con) * log((conCount - con)/(totCount - tot)); ll[i] = llTemp; } else ll[i] = 1; } } return; } void findTopNCluster(double * x, double * y, int locCount, double * ll, double wSize, int wCount, int * center, int * radius, double * cLL, int nClusters) { if(nClusters < 1) return; int aCenter = -1; int aRadius = -1; for(int i = 0; i < locCount; i++) { for(int j = 0; j < wCount; j++) { if(ll[i * wCount + j] < 0) { if(aCenter < 0) { aCenter = i; aRadius = j; } else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) { aCenter = i; aRadius = j; } } } } center[0] = aCenter; radius[0] = aRadius; cLL[0] = ll[aCenter * wCount + aRadius]; double lastX, lastY, lastRad; lastX = x[aCenter]; lastY = y[aCenter]; lastRad = (aRadius + 1) * wSize; double distance; int maxWindow; for(int c = 1; c < nClusters; c ++) { //Remove intersecting clusters for(int i = 0; i < locCount; i++) { distance = sqrt((x[i] - lastX) * (x[i] - lastX) + (y[i] - lastY) * (y[i] - lastY)) - lastRad; maxWindow = ceil(distance / wSize) - 1; if(maxWindow < 0) maxWindow = 0; for(int j = maxWindow; j < wCount; j++) ll[i * wCount + j] = 1; } //Find secoundary clusters aCenter = -1; aRadius = -1; for(int i = 0; i < locCount; i++) { for(int j = 0; j < wCount; j++) { if(ll[i * wCount + j] < 0) { if(aCenter < 0) { aCenter = i; aRadius = j; } else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) { aCenter = i; aRadius = j; } } } } center[c] = aCenter; radius[c] = aRadius; if(aCenter != -1) cLL[c] = ll[aCenter * wCount + aRadius]; else break; lastX = x[aCenter]; lastY = y[aCenter]; lastRad = (aRadius + 1) * wSize; } return; }
foxnn.h
//Copyright[2019][Gaganov Ilya] //Licensed under the Apache License, Version 2.0 #pragma once #include "layer.h" #include "train_data.h" #include "settings.h" #include <iostream> #include <fstream> #include <vector> #include <random> #include <ctime> #include <string> #include <cmath> #include <omp.h> #include <iterator> #include <algorithm> #include <set> #include <numeric> #include <ostream> #include <memory> #include <iomanip> using namespace std; class neural_network { public: neural_network(void) {} neural_network(const string &name_file, const bool& only_scale = false) { ifstream file(name_file); if (only_scale == false) settings = Settings(file); size_t N_layers; file >> N_layers; layers.reserve(N_layers); for (size_t i = 0; i < N_layers; ++i) layers.push_back(make_shared <layer> (file, only_scale)); file.close(); } neural_network(const neural_network &a) { for (size_t i = 0; i < a.layers.size(); i++) layers.push_back(make_shared<layer> (*(a.layers[i]))); } //the basic method of creating a network neural_network(const vector<int> &parameters) { bool error = false; for (int i : parameters) if (i < 1) { cout << "the number of input and output features must be positive" << endl; error = true; } if (error == false) for (size_t i = 0; i < parameters.size() - 1; ++i) layers.push_back(make_shared <layer>(parameters[i], parameters[i + 1], get_activation_function("sigmoid"))); } //adding a layer to the end void next_layer(const layer &new_layer) { layers.push_back(make_shared<layer>(new_layer)); return; } //to give the value of the network from the input vector<double> get_out(const vector<double> &first_in) const { vector<double> enter; vector<double> out; layers[0]->get_out(first_in, out); for (size_t i = 1; i < layers.size(); ++i) { enter = move(out); layers[i]->get_out(enter, out, settings.correct_summation); } correction_out(out); return out; } vector<double> get_out(const one_train_data &first_in) const { return get_out(first_in.input); } void train_on_file(const string &name_file, const double &speed, const size_t &max_iteration, const size_t & size_train_batch = 1) { train_data test(name_file); train(test, speed, max_iteration, size_train_batch); } void train(train_data &data_for_train, const double& speed, const size_t& max_iteration, const size_t& size_train_batch = 1) { double start_time; const size_t size_batch = get_batch_size(data_for_train.size(), size_train_batch); init_memory_for_train(size_batch); const size_t size_test = data_for_train.size() * settings.part_for_test; const train_data test = data_for_train.get_part_for_test(size_test); settings.settings_optimization.adam.step = 0; size_t n_data_for_only_train = data_for_train.size() * (1 - settings.part_for_test); if (n_data_for_only_train == 0 || n_data_for_only_train < size_batch) n_data_for_only_train = data_for_train.size(); train_data data_for_only_train = data_for_train.get_first_n(n_data_for_only_train); for (size_t iteration = 1; iteration <= max_iteration; ++iteration) { start_train_progressbar(iteration, max_iteration, start_time); train_data batch(data_for_only_train.get_part(size_batch)); train_nn(batch, speed); print_info_iteration(iteration, max_iteration, size_test, test, start_time); auto_save(iteration); } delete_memory_after_train(); } void save(const string &name_file, const bool &only_scale = false) const { ofstream file(name_file); if (only_scale == false) settings.save(file); file << layers.size() << endl; for (size_t i = 0; i < layers.size(); ++i) layers[i]->save(file, only_scale); file.close(); return; } void random_mutation(const double &speed) { #pragma omp parallel for num_threads(settings.n_threads) for (size_t i = 0; i < layers.size(); ++i) layers[i]->random_mutation(speed); } void smart_mutation(const double &speed) { #pragma omp parallel for num_threads(settings.n_threads) for (size_t i = 0; i < layers.size(); ++i) layers[i]->smart_mutation(speed); } void print_info(void) { for (size_t i = 0; i < layers.size(); ++i) layers[i]->print(i); settings.print_settings(); } double testing(const train_data& test) const { size_t n_true; const double er = get_error_for_test(test, n_true); cout << "error = " << scientific << setprecision(15) << er << " n_true = " << n_true << "/" << test.size() << endl; return er; } layer& operator[] (const size_t &i) { return *(layers[i]); } layer& get_layer (const size_t& i) //for Python { return *(layers[i]); } Settings settings; private: void delete_memory_after_train() { for (auto i : layers) i->delete_memory_after_train(); } void auto_save(const size_t &iteration) const { if (settings.auto_save_iteration != 0) if (iteration % settings.auto_save_iteration == 0) save(settings.auto_save_name_file); } void print_info_iteration(const size_t &iteration, const size_t &max_iteration, const size_t &size_test, const train_data &test, const double &start_time) const { train_progressbar(iteration, max_iteration, start_time); if (settings.n_print != 0 && size_test != 0) if (iteration == max_iteration || (iteration % settings.n_print == 0)) { testing(test); cout << "iteration = " << iteration << endl << endl; } } void start_train_progressbar(const size_t &i, const size_t &max_iteration, double &start_time) const { if (settings.n_print == 0) return; if ((i - 1) % settings.n_print == 0) { start_time = omp_get_wtime(); cout << "train progressbar: "; const size_t i_last_print = i - 1; const size_t n_iteration_between_print = (max_iteration >= i_last_print + settings.n_print) ? settings.n_print : max_iteration - i_last_print; start_progressbar(n_iteration_between_print); } } void start_progressbar(const size_t& max_iteration) const { const size_t len_str_max_iteration = std::to_string(max_iteration).size(); cout << setw(len_str_max_iteration) << 0 << "/" << max_iteration << " (" << setw(3) << 0 << "%)"; } void progressbar(const size_t& now, const size_t& max_iteration) const { const size_t len_str_max_iteration = std::to_string(max_iteration).size(); const size_t proc = (now * 100) / max_iteration; string deleter; for (int i = 0; i < 2 * len_str_max_iteration + 8; ++i) deleter.push_back('\b'); cout << deleter; cout << setw(len_str_max_iteration) << now << "/" << max_iteration << " (" << setw(3) << proc << "%)"; } void train_progressbar(const size_t & i, const size_t & max_iteration, const double& start_time) const { if (settings.n_print == 0) return; const size_t i_after_print = i - ((i - 1) / settings.n_print) * settings.n_print; const size_t i_last_print = ((i - 1) / settings.n_print) * settings.n_print; const size_t n_iteration_between_print = (max_iteration < i_last_print + settings.n_print) ? max_iteration - i_last_print : settings.n_print; progressbar(i_after_print, n_iteration_between_print); if (i_after_print == n_iteration_between_print) { const double train_time = omp_get_wtime() - start_time; cout << " train_time = " << fixed << setprecision(3) << train_time << endl; } } size_t get_batch_size(const size_t &size_file_test, const size_t & size_train_batch) const { if (size_train_batch == 0 || size_train_batch >= size_file_test) return size_file_test; else return size_train_batch; return 1; } double get_error_for_test(const train_data &test, size_t &n_true) const { const double start_test = omp_get_wtime(); double error = 0.0; size_t n_true_answer = 0; cout << "test progressbar: "; start_progressbar(test.size()); size_t iteration_done = 0; omp_lock_t lock; omp_init_lock(&lock); #pragma omp parallel for num_threads(settings.n_threads) reduction (+: error) reduction (+: n_true_answer) shared(test) for (size_t i = 0; i < test.size(); ++i) { size_t need_max = 0; const vector <double> out = get_out(test[i]->input); for (size_t j = 0; j < out.size(); ++j) { const double delta = fabs(out[j] - test[i]->out[j]); error += delta; if (delta < settings.min_error) need_max++; } if (need_max == out.size()) n_true_answer++; omp_set_lock(&lock); iteration_done++; progressbar(iteration_done, test.size()); omp_unset_lock(&lock); } n_true = n_true_answer; omp_destroy_lock(&lock); const double time_test = omp_get_wtime() - start_test; cout << " time_test = " << fixed << setprecision(3) << time_test << endl; return error / test.size(); } void error_last_layer(const train_data &batch, vector <vector <double>> &error) const { error.resize(batch.size()); for (size_t i = 0; i < batch.size(); ++i) error[i].resize(batch[i]->out.size()); for (size_t i = 0; i < batch.size(); ++i) for (size_t j = 0; j < batch[i]->out.size(); ++j) error[i][j] = layers.back()->output[i][j] - batch[i]->out[j]; return; } void forward_stroke(const train_data &batch) { vector <shared_ptr<layer>>& layers2 = layers; #pragma omp parallel for num_threads(settings.n_threads) shared(batch, layers2) for (size_t j = 0; j < batch.size(); ++j) { layers2[0]->get_out(batch[j]->input, layers2[0]->output[j]); for (size_t i = 1; i < layers2.size(); ++i) layers2[i]->get_out(layers2[i - 1]->output[j], layers2[i]->output[j]); correction_out(layers2.back()->output[j]); } } void correction_out(vector<double> &out) const { if (settings.max_on_last_layer == 1) { const size_t max_n = distance(out.begin(), max_element(out.begin(), out.end())); fill(out.begin(), out.end(), 0.0); out[max_n] = 1; return; } if (settings.one_if_value_greater_intermediate_value == 1) { for_each(out.begin(), out.end(), [&](double& num) { if (num >= settings.intermediate_value) { num = 1.0; } else { num = 0.0; } } ); return; } } void init_memory_for_train(const size_t & size_batch) { for (size_t i = 0; i < layers.size(); ++i) layers[i]->init_memory_for_train(size_batch, settings); } void train_nn(const train_data & batch, const double &speed) { forward_stroke(batch); vector <vector <double>> error; vector <shared_ptr<layer>>& layers2 = layers; error_last_layer(batch, error); #pragma omp parallel for num_threads(settings.n_threads) shared(error, batch, layers2) for (size_t i = 0; i < batch.size(); ++i) { for (size_t j = layers2.size() - 1; j >= 1; --j) layers2[j]->back_running(error[i], layers2[j - 1]->output[i], settings.correct_summation); layers2[0]->back_running(error[i], batch[i]->input, settings.correct_summation); } #pragma omp parallel for num_threads(settings.n_threads) shared(layers2) for (int i = 0; i < layers2.size(); ++i) layers2[i]->correction_of_scales(speed, settings); settings.settings_optimization.adam.next_step(); return; } vector <shared_ptr<layer>> layers; };
prepress.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS % % P P R R E P P R R E SS SS % % PPPP RRRR EEE PPPP RRRR EEE SSS SSS % % P R R E P R R E SS SS % % P R R EEEEE P R R EEEEE SSSSS SSSSS % % % % % % MagickCore Prepress Methods % % % % Software Design % % Cristy % % October 2001 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-view.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/prepress.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T o t a l I n k D e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageTotalInkDensity() returns the total ink density for a CMYK image. % Total Ink Density (TID) is determined by adding the CMYK values in the % darkest shadow area in an image. % % The format of the GetImageTotalInkDensity method is: % % double GetImageTotalInkDensity(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport double GetImageTotalInkDensity(Image *image, ExceptionInfo *exception) { CacheView *image_view; double total_ink_density; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return(0.0); } status=MagickTrue; total_ink_density=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double density; register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { density=(double) GetPixelRed(image,p)+GetPixelGreen(image,p)+ GetPixelBlue(image,p)+GetPixelBlack(image,p); if (density > total_ink_density) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageTotalInkDensity) #endif { if (density > total_ink_density) total_ink_density=density; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) total_ink_density=0.0; return(total_ink_density); }
heap_mult.h
#include "CSC.h" #include "utility.h" #include <omp.h> #include <algorithm> #include <iostream> using namespace std; /** ** Count flop of SpGEMM between A and B in CSC format **/ template <typename IT, typename NT> long long int get_flop(const CSC<IT,NT> & A, const CSC<IT,NT> & B, IT *maxnnzc) { long long int flop = 0; // total flop (multiplication) needed to generate C #pragma omp parallel { long long int tflop=0; //thread private flop #pragma omp for for (IT i=0; i < B.cols; ++i) { // for all columns of B long long int locmax = 0; for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) { // For all the nonzeros of the ith column IT inner = B.rowids[j]; // get the row id of B (or column id of A) IT npins = A.colptr[inner+1] - A.colptr[inner]; // get the number of nonzeros in A's corresponding column locmax += npins; } maxnnzc[i] = locmax; tflop += locmax; } #pragma omp critical { flop += tflop; } } return flop * 2; } template <typename IT, typename NT> long long int get_flop(const CSC<IT,NT> & A, const CSC<IT,NT> & B) { IT *dummy = my_malloc<IT>(B.cols); long long int flop = get_flop(A, B, dummy); my_free<IT>(dummy); return flop; } template <typename IT, typename NT, typename MultiplyOperation, typename AddOperation> void HeapSpGEMM(const CSC<IT,NT> & A, const CSC<IT,NT> & B, CSC<IT,NT> & C, MultiplyOperation multop, AddOperation addop) { int numThreads; #pragma omp parallel { numThreads = omp_get_num_threads(); } // *************** Load-balancing Thread Scheduling ********************* IT *maxnnzc = my_malloc<IT>(B.cols); long long int flops = get_flop(A, B, maxnnzc) / 2; IT flopsPerThread = flops/numThreads; // amount of work that will be assigned to each thread IT *colPerThread = my_malloc<IT>(numThreads + 1); //thread i will process columns from colPerThread[i] to colPerThread[i+1]-1 IT *colStart = my_malloc<IT>(B.cols); //start index in the global array for storing ith column of C IT *colEnd = my_malloc<IT>(B.cols); //end index in the global array for storing ith column of C colStart[0] = 0; colEnd[0] = 0; int curThread = 0; colPerThread[curThread++] = 0; IT nextflops = flopsPerThread; /* Parallelized version */ scan(maxnnzc, colStart, B.cols); #pragma omp parallel for for (int i = 1; i < B.cols; ++i) { colEnd[i] = colStart[i]; } #pragma omp parallel { int tid = omp_get_thread_num(); long end_itr = (lower_bound(colStart, colStart + B.cols, flopsPerThread * (tid + 1))) - colStart; colPerThread[tid + 1] = end_itr; } colPerThread[numThreads] = B.cols; // *************** Creating global space to store result, used by all threads ********************* IT size = colEnd[B.cols-1] + maxnnzc[B.cols-1]; IT **LocalRowIdsofC = my_malloc<IT*>(numThreads); NT **LocalValuesofC = my_malloc<NT*>(numThreads); #pragma omp parallel { int tid = omp_get_thread_num(); IT localsum = 0; for (IT i = colPerThread[tid]; i < colPerThread[tid + 1]; ++i) { localsum += maxnnzc[i]; } LocalRowIdsofC[tid] = my_malloc<IT>(localsum); LocalValuesofC[tid] = my_malloc<NT>(localsum); } my_free<IT>(maxnnzc); // *************** Creating LOCAL heap space to be used by all threads ********************* IT *threadHeapSize = my_malloc<IT>(numThreads); #pragma omp parallel { int thisThread = omp_get_thread_num(); // IT localmax = -1; //incorrect IT localmax = 0; for (IT i = colPerThread[thisThread]; i < colPerThread[thisThread + 1]; ++i) { IT colnnz = B.colptr[i + 1] - B.colptr[i]; if (colnnz > localmax) localmax = colnnz; } threadHeapSize[thisThread] = localmax; } // ************************ Numeric Phase ************************************* #pragma omp parallel { int thisThread = omp_get_thread_num(); HeapEntry<IT, NT> *mergeheap = my_malloc<HeapEntry<IT, NT>>(threadHeapSize[thisThread]); for (IT i = colPerThread[thisThread]; i < colPerThread[thisThread + 1]; ++i) { IT k = 0; // Make initial heap for (IT j = B.colptr[i]; j < B.colptr[i + 1]; ++j) { // For all the nonzeros of the ith column IT inner = B.rowids[j]; // get the row id of B (or column id of A) IT npins = A.colptr[inner + 1] - A.colptr[inner]; // get the number of nonzeros in A's corresponding column if (npins > 0) { mergeheap[k].loc = 1; mergeheap[k].runr = j; // the pointer to B.rowid's is the run-rank mergeheap[k].value = multop(A.values[A.colptr[inner]], B.values[j]); mergeheap[k++].key = A.rowids[A.colptr[inner]]; // A's first rowid is the first key } } IT hsize = k; // if any of A's "significant" columns is empty, k will be less than hsize make_heap(mergeheap, mergeheap + hsize); while(hsize > 0) { pop_heap(mergeheap, mergeheap + hsize); // result is stored in mergeheap[hsize-1] HeapEntry<IT,NT> hentry = mergeheap[hsize - 1]; // Use short circuiting if ((colEnd[i] > colStart[i]) && LocalRowIdsofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]] - 1] == hentry.key) { LocalValuesofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]] - 1] = addop(hentry.value, LocalValuesofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]] - 1]); } else { LocalValuesofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]]]= hentry.value; LocalRowIdsofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]]]= hentry.key; colEnd[i] ++; } IT inner = B.rowids[hentry.runr]; // If still unused nonzeros exists in A(:,colind), insert the next nonzero to the heap if ((A.colptr[inner + 1] - A.colptr[inner]) > hentry.loc) { IT index = A.colptr[inner] + hentry.loc; mergeheap[hsize-1].loc = hentry.loc + 1; mergeheap[hsize-1].runr = hentry.runr; mergeheap[hsize-1].value = multop(A.values[index], B.values[hentry.runr]); mergeheap[hsize-1].key = A.rowids[index]; push_heap(mergeheap, mergeheap + hsize); } else { --hsize; } } } my_free<HeapEntry<IT, NT>>(mergeheap); } my_free<IT>(threadHeapSize); if (C.isEmpty()) { C.make_empty(); } // ************************ Copy output to C ************************************* C.rows = A.rows; C.cols = B.cols; C.colptr = my_malloc<IT>(C.cols + 1); C.colptr[0] = 0; IT *col_nz = my_malloc<IT>(C.cols); #pragma omp parallel for for (int i = 0; i < C.cols; ++i) { col_nz[i] = colEnd[i] - colStart[i]; } scan(col_nz, C.colptr, C.cols + 1); my_free<IT>(col_nz); C.nnz = C.colptr[C.cols]; C.rowids = my_malloc<IT>(C.nnz); C.values = my_malloc<NT>(C.nnz); #pragma omp parallel { int thisThread = omp_get_thread_num(); for(int i = colPerThread[thisThread]; i< colPerThread[thisThread + 1]; ++i) { // combine step copy(&LocalRowIdsofC[thisThread][colStart[i] - colStart[colPerThread[thisThread]]], &LocalRowIdsofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]]], C.rowids + C.colptr[i]); copy(&LocalValuesofC[thisThread][colStart[i] - colStart[colPerThread[thisThread]]], &LocalValuesofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]]], C.values + C.colptr[i]); } } // ************************ Memory deallocation ************************************* #pragma omp parallel { int thisThread = omp_get_thread_num(); my_free<IT>(LocalRowIdsofC[thisThread]); my_free<NT>(LocalValuesofC[thisThread]); } my_free<IT*>(LocalRowIdsofC); my_free<NT*>(LocalValuesofC); my_free<IT>(colPerThread); my_free<IT>(colEnd); my_free<IT>(colStart); }
bfs_threaded.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #if USE_MPI #include <mpi.h> #endif #ifdef _OPENMP #include <omp.h> #endif #include "graph.h" #if PRED_CACHE_BYPASS #include <emmintrin.h> #include <xmmintrin.h> #endif int run_bfs_2Dgraph_threaded(dist_graph_t* g, uint64_t root, uint64_t* pred, uint64_t *pred_array_size_ptr, uint64_t* nvisited) { uint64_t next_vert_count; uint64_t current_vert_count; uint64_t global_queue_vert_count; uint32_t *queue_current, *queue_next, *tmp_queue_ptr; uint64_t vis_level; uint64_t pred_count; pred_count = 0; #pragma omp parallel { uint64_t pred_vert_pair_count; const uint32_t* restrict adj; const uint32_t* restrict num_edges; uint64_t n_local, n_local_col, n_local_row; uint8_t* d; uint64_t pred_local[LOCAL_QUEUE_SIZE] __attribute__ ((aligned (16))); #if USE_MPI int32_t* restrict recvbuf_displs; uint32_t *adj_sendbuf, *adj_recvbuf; #endif int32_t *sendbuf_counts, *recvbuf_counts; uint32_t queue_next_local[LOCAL_QUEUE_SIZE] __attribute__ ((aligned (16))); #if USE_MPI uint32_t adj_sendbuf_local[LOCAL_SENDBUF_SIZE*MAX_NUMPROCS] __attribute__ ((aligned (16))); uint32_t adj_sendbuf_counts_local[MAX_NUMPROCS] __attribute__ ((aligned (16))); assert(g->nproc_rows <= MAX_NUMPROCS); #endif int32_t *adj_sendbuf_counts, *adj_sendbuf_displs; int32_t *adj_recvbuf_counts, *adj_recvbuf_displs; int irow, jcol, rank_rep; int nproc_rows, nproc_cols; uint64_t bitmask63; int tid; #if TIME_BFS_SUBROUTINES double elt, full_elt, init_elt, mpi_elt, sift_elt, sift_elt_part, load_elt, trans_elt, trans_elt_part, load_elt_part, mpi_elt_part; elt = init_elt = mpi_elt = load_elt = sift_elt = full_elt = load_elt_part = sift_elt_part = trans_elt = trans_elt_part = mpi_elt_part = 0.0; #endif #if __x86_64__ bitmask63 = (1UL<<63)-1; #else bitmask63 = (1ULL<<63)-1; #endif #ifdef _OPENMP tid = omp_get_thread_num(); #else tid = 0; #endif #pragma omp barrier #if TIME_BFS_SUBROUTINES if ((rank == 0) && (tid == 0)) full_elt = elt = get_seconds(); #endif pred_vert_pair_count = 0; n_local = g->n_local; n_local_col = g->n_local_col; n_local_row = g->n_local_row; nproc_rows = g->nproc_rows; nproc_cols = g->nproc_cols; d = g->d; sendbuf_counts = (g->comm_data).sendbuf_counts; recvbuf_counts = (g->comm_data).recvbuf_counts; //sendbuf_displs = (g->comm_data).sendbuf_displs; recvbuf_displs = (g->comm_data).recvbuf_displs; adj_sendbuf = (g->comm_data).adj_sendbuf; adj_recvbuf = (g->comm_data).adj_recvbuf; adj_sendbuf_counts = (g->comm_data).adj_sendbuf_counts; adj_sendbuf_displs = (g->comm_data).adj_sendbuf_displs; adj_recvbuf_counts = (g->comm_data).adj_recvbuf_counts; adj_recvbuf_displs = (g->comm_data).adj_recvbuf_displs; adj = g->adj; num_edges = g->num_edges; irow = (g->comm_data).irow; jcol = (g->comm_data).jcol; rank_rep = (rank % (nproc_rows * nproc_cols)); int num_a2a_iterations = 0; if (tid == 0) { queue_current = g->queue_current; queue_next = g->queue_next; *nvisited = 0; for (int i=0; i<nprocs; i++) { sendbuf_counts[i] = 0; recvbuf_counts[i] = 0; } } #if USE_MPI for (int i=0; i<nproc_rows; i++) { adj_sendbuf_counts_local[i] = 0; } #endif int next_vert_count_local = 0; #pragma omp barrier #if REPLICATE_D #pragma omp for for (int64_t i=0; i<n_local_col; i++) { d[i] = 0; } #else #pragma omp for for (int64_t i=0; i<n_local; i++) { d[i] = 0; } #endif uint32_t src_owner_proc_row = root/n_local_row; if (tid == 0) { if (src_owner_proc_row == irow) { queue_current[0] = root; current_vert_count = 1; } else { current_vert_count = 0; } } uint32_t src_owner_proc = root/n_local; if (tid == 0) { if (src_owner_proc == rank_rep) { #if STORE_PRED pred_local[pred_vert_pair_count] = root % n_local_col; pred_local[pred_vert_pair_count+1] = bitmask63; pred_vert_pair_count = 2; #endif } } #if REPLICATE_D uint32_t src_owner_proc_col = root/n_local_col; if (tid == 0) { if (src_owner_proc_col == jcol) { uint64_t local_src_val = root % n_local_col; d[local_src_val] = 1; } } #else if (tid == 0) { if (src_owner_proc == rank_rep) { uint64_t local_src_val = root % n_local; d[local_src_val] = 1; fprintf(stderr, "local src val %lu\n", local_src_val); } } #endif if (tid == 0) { next_vert_count = 0; global_queue_vert_count = 1; vis_level = 2; } #pragma omp barrier #if TIME_BFS_SUBROUTINES if ((rank == 0) && (tid == 0)) { elt = get_seconds() - elt; init_elt = elt; } #endif while (global_queue_vert_count > 0) { if (tid == 0) { for (int i=0; i<nproc_rows; i++) { adj_sendbuf_counts[i] = 0; adj_recvbuf_counts[i] = 0; } } #if TIME_BFS_SUBROUTINES if ((rank == 0) && (tid == 0)) { load_elt_part = get_seconds(); } #endif #pragma omp barrier #pragma omp for schedule(guided) nowait for (uint64_t i=0; i<current_vert_count; i++) { uint64_t u = (queue_current[i] % n_local_row); uint64_t num_edges_u_start = num_edges[u]; uint64_t num_edges_u_end = num_edges[u+1]; for (uint64_t j=num_edges_u_start; j<num_edges_u_end; j++) { uint64_t v = adj[j]; uint64_t v_owner_proc_row = v/n_local; #if SAMEPROC_NOREAD #if USE_MPI if (v_owner_proc_row == irow) { #endif uint64_t u_global = n_local_row*irow + u; #if REPLICATE_D uint32_t d_val = d[v]; if (d_val == 0) { d[v] = vis_level; queue_next_local[next_vert_count_local++] = v + jcol*n_local_col; if (next_vert_count_local == LOCAL_QUEUE_SIZE) { uint64_t queue_next_offset = __sync_fetch_and_add(&next_vert_count, LOCAL_QUEUE_SIZE); next_vert_count_local = 0; memcpy(queue_next+queue_next_offset, queue_next_local, LOCAL_QUEUE_SIZE * 4); } #if STORE_PRED #if PRED_CACHE_BYPASS __m128i pv = _mm_set_epi64(*(__m64 *) &u_global, *(__m64 *) &v); _mm_stream_si128((__m128i *) &pred_local[pred_vert_pair_count], pv); #else pred_local[pred_vert_pair_count] = v; pred_local[pred_vert_pair_count+1] = u_global; #endif pred_vert_pair_count += 2; if (pred_vert_pair_count == LOCAL_QUEUE_SIZE) { uint64_t pred_next_offset = __sync_fetch_and_add(&pred_count, LOCAL_QUEUE_SIZE); pred_vert_pair_count = 0; memcpy(pred + pred_next_offset, pred_local, LOCAL_QUEUE_SIZE * 8); } #endif } #else uint64_t v_local = v % n_local; uint32_t d_val = d[v_local]; if (d_val == 0) { d[v_local] = vis_level; queue_next_local[next_vert_count_local++] = v + jcol*n_local_col; if (next_vert_count_local == LOCAL_QUEUE_SIZE) { uint64_t queue_next_offset = __sync_fetch_and_add(&next_vert_count, LOCAL_QUEUE_SIZE); next_vert_count_local = 0; memcpy(queue_next+queue_next_offset, queue_next_local, LOCAL_QUEUE_SIZE * 4); } #if STORE_PRED #if PRED_CACHE_BYPASS __m128i pv = _mm_set_epi64(*(__m64 *) &u_global, *(__m64 *) &v); _mm_stream_si128((__m128i *) &pred_local[pred_vert_pair_count], pv); #else pred_local[pred_vert_pair_count] = v; pred_local[pred_vert_pair_count+1] = u_global; #endif pred_vert_pair_count += 2; if (pred_vert_pair_count == LOCAL_QUEUE_SIZE) { uint64_t pred_next_offset = __sync_fetch_and_add(&pred_count, LOCAL_QUEUE_SIZE); pred_vert_pair_count = 0; memcpy(pred + pred_next_offset, pred_local, LOCAL_QUEUE_SIZE * 8); } #endif } #endif #endif #if USE_MPI #if SAMEPROC_NOREAD } else { #endif #if REPLICATE_D uint32_t d_val = d[v]; if (d_val == 0) { d[v] = vis_level; uint32_t local_pos = v_owner_proc_row*LOCAL_SENDBUF_SIZE + adj_sendbuf_counts_local[v_owner_proc_row]; adj_sendbuf_local[local_pos] = v; adj_sendbuf_local[local_pos+1] = u; adj_sendbuf_counts_local[v_owner_proc_row] += 2; if (adj_sendbuf_counts_local[v_owner_proc_row] == LOCAL_SENDBUF_SIZE) { int64_t sendbuf_counts_offset = __sync_fetch_and_add( adj_sendbuf_counts+v_owner_proc_row, LOCAL_SENDBUF_SIZE); uint32_t adj_sendbuf_offset = adj_sendbuf_displs[v_owner_proc_row] + sendbuf_counts_offset; memcpy(adj_sendbuf+adj_sendbuf_offset, adj_sendbuf_local+v_owner_proc_row*LOCAL_SENDBUF_SIZE, LOCAL_SENDBUF_SIZE * 4); adj_sendbuf_counts_local[v_owner_proc_row] = 0; } // int32_t pos = adj_sendbuf_displs[v_owner_proc_row] + // adj_sendbuf_counts[v_owner_proc_row]; //adj_sendbuf_counts[v_owner_proc_row] += 2; //adj_sendbuf[pos] = v; //adj_sendbuf[pos+1] = u; } #else uint32_t local_pos = v_owner_proc_row*LOCAL_SENDBUF_SIZE + adj_sendbuf_counts_local[v_owner_proc_row]; adj_sendbuf_local[local_pos] = v; adj_sendbuf_local[local_pos+1] = u; adj_sendbuf_counts_local[v_owner_proc_row] += 2; if (adj_sendbuf_counts_local[v_owner_proc_row] == LOCAL_SENDBUF_SIZE) { int64_t sendbuf_counts_offset = __sync_fetch_and_add( adj_sendbuf_counts+v_owner_proc_row, LOCAL_SENDBUF_SIZE); uint32_t adj_sendbuf_offset = adj_sendbuf_displs[v_owner_proc_row] + sendbuf_counts_offset; memcpy(adj_sendbuf+adj_sendbuf_offset, adj_sendbuf_local+v_owner_proc_row*LOCAL_SENDBUF_SIZE, LOCAL_SENDBUF_SIZE * 4); adj_sendbuf_counts_local[v_owner_proc_row] = 0; } #endif } #endif #if SAMEPROC_NOREAD } #endif } #pragma omp barrier #pragma omp critical { #if USE_MPI for (int i=0; i<nproc_rows; i++) { if (adj_sendbuf_counts_local[i] > 0) { uint32_t adj_sendbuf_offset = adj_sendbuf_displs[i] + adj_sendbuf_counts[i]; //fprintf(stderr, "rank %d, count %d, init %d, offset %d\n", i, // adj_sendbuf_counts_local[i], sendbuf_counts[i], adj_sendbuf_offset); memcpy(adj_sendbuf+adj_sendbuf_offset, adj_sendbuf_local+i*LOCAL_SENDBUF_SIZE, adj_sendbuf_counts_local[i] * 4); adj_sendbuf_counts[i] += adj_sendbuf_counts_local[i]; adj_sendbuf_counts_local[i] = 0; } } #endif } #pragma omp barrier #if TIME_BFS_SUBROUTINES if (tid == 0) { if (rank == 0) { load_elt += get_seconds() - load_elt_part; mpi_elt_part = get_seconds(); } } #endif #if USE_MPI if (tid == 0) { MPI_Alltoall(adj_sendbuf_counts, 1, MPI_UNSIGNED, adj_recvbuf_counts, 1, MPI_UNSIGNED, (g->comm_data).col_comm); MPI_Alltoallv(adj_sendbuf, adj_sendbuf_counts, adj_sendbuf_displs, MPI_UNSIGNED, adj_recvbuf, adj_recvbuf_counts, adj_recvbuf_displs, MPI_UNSIGNED, (g->comm_data).col_comm); #if 0 long adj_cut_count = 0; for (int i=0; i<nproc_rows; i++) { adj_cut_count += adj_sendbuf_counts[i]; } MPI_Barrier(MPI_COMM_WORLD); long adj_cut_count_max = 0; long adj_cut_count_sum = 0; MPI_Allreduce(&adj_cut_count, &adj_cut_count_max, 1, MPI_LONG, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(&adj_cut_count, &adj_cut_count_sum, 1, MPI_LONG, MPI_SUM, MPI_COMM_WORLD); adj_cut_count_total += adj_cut_count; if ((rank == 0) && (adj_cut_count_max > g->m_local/10)) fprintf(stderr, "alltoallv, level %d, max %ld, total %ld, imbalance %3.4lf\n", vis_level, adj_cut_count_max, adj_cut_count_sum, ((double) adj_cut_count_max)*nprocs/adj_cut_count_sum); // MPI_Barrier(MPI_COMM_WORLD); #endif } #endif #if TIME_BFS_SUBROUTINES #if 0 uint64_t datavol_sendsize = 0; uint64_t datavol_recvsize = 0; for (i=0; i<nproc_rows; i++) { datavol_sendsize += adj_sendbuf_counts[i]; datavol_recvsize += adj_recvbuf_counts[i]; } MPI_Barrier((g->comm_data).col_comm); #endif if (tid == 0) MPI_Barrier((g->comm_data).col_comm); if ((rank == 0) && (tid == 0)) { mpi_elt_part = get_seconds() - mpi_elt_part; /* if (mpi_elt_part > 0.1) { fprintf(stderr, "level %lu, datavol %lu %lu, time %9.6lf s\n", vis_level, datavol_sendsize, datavol_recvsize, mpi_elt_part); } */ mpi_elt += mpi_elt_part; sift_elt_part = get_seconds(); } #endif #pragma omp barrier #if USE_MPI #pragma omp for for (uint64_t i=0; i<nproc_rows; i++) { uint64_t recvbuf_start = adj_recvbuf_displs[i]; uint64_t recvbuf_end = recvbuf_start + adj_recvbuf_counts[i]; for (uint64_t j=recvbuf_start; j<recvbuf_end; j+= 2) { uint64_t v = adj_recvbuf[j]; uint64_t u_pred = adj_recvbuf[j+1]; uint64_t u_global = i*n_local_row+u_pred; #if REPLICATE_D uint32_t d_val = d[v]; if (d_val == 0) { d[v] = vis_level; // queue_next[next_vert_count++] = v + jcol*n_local_col; queue_next_local[next_vert_count_local++] = v + jcol*n_local_col; if (next_vert_count_local == LOCAL_QUEUE_SIZE) { uint64_t queue_next_offset = __sync_fetch_and_add(&next_vert_count, LOCAL_QUEUE_SIZE); next_vert_count_local = 0; memcpy(queue_next+queue_next_offset, queue_next_local, LOCAL_QUEUE_SIZE * 4); } #if STORE_PRED #if PRED_CACHE_BYPASS __m128i pv = _mm_set_epi64(*(__m64 *) &u_global, *(__m64 *) &v); _mm_stream_si128((__m128i *) &pred_local[pred_vert_pair_count], pv); #else pred_local[pred_vert_pair_count] = v; pred_local[pred_vert_pair_count+1] = u_global; #endif pred_vert_pair_count += 2; if (pred_vert_pair_count == LOCAL_QUEUE_SIZE) { uint64_t pred_next_offset = __sync_fetch_and_add(&pred_count, LOCAL_QUEUE_SIZE); pred_vert_pair_count = 0; memcpy(pred + pred_next_offset, pred_local, LOCAL_QUEUE_SIZE * 8); } #endif } #else uint64_t v_local = v % n_local; uint32_t d_val = d[v_local]; if (d_val == 0) { d[v_local] = vis_level; queue_next_local[next_vert_count_local++] = v + jcol*n_local_col; if (next_vert_count_local == LOCAL_QUEUE_SIZE) { uint64_t queue_next_offset = __sync_fetch_and_add(&next_vert_count, LOCAL_QUEUE_SIZE); next_vert_count_local = 0; memcpy(queue_next+queue_next_offset, queue_next_local, LOCAL_QUEUE_SIZE * 4); } // queue_next[next_vert_count++] = v+jcol*n_local_col; #if STORE_PRED #if PRED_CACHE_BYPASS __m128i pv = _mm_set_epi64(*(__m64 *) &u_global, *(__m64 *) &v); _mm_stream_si128((__m128i *) &pred_local[pred_vert_pair_count], pv); #else pred_local[pred_vert_pair_count] = v; pred_local[pred_vert_pair_count+1] = u_global; #endif pred_vert_pair_count += 2; if (pred_vert_pair_count == LOCAL_QUEUE_SIZE) { uint64_t pred_next_offset = __sync_fetch_and_add(&pred_count, LOCAL_QUEUE_SIZE); pred_vert_pair_count = 0; memcpy(pred + pred_next_offset, pred_local, LOCAL_QUEUE_SIZE * 8); } #endif } #endif } } #endif #pragma omp barrier /* Critical section to write out remaining verts */ #pragma omp critical { if (next_vert_count_local > 0) { memcpy(queue_next+next_vert_count, queue_next_local, next_vert_count_local * 4); next_vert_count += next_vert_count_local; next_vert_count_local = 0; } if (pred_vert_pair_count > 0) { uint64_t pred_next_offset = __sync_fetch_and_add(&pred_count, pred_vert_pair_count); memcpy(pred + pred_next_offset, pred_local, pred_vert_pair_count * 8); pred_vert_pair_count = 0; } } #pragma omp barrier if (tid == 0) { #if TIME_BFS_SUBROUTINES MPI_Barrier((g->comm_data).col_comm); if (rank == 0) { sift_elt += get_seconds() - sift_elt_part; trans_elt_part = get_seconds(); } #endif /* Transpose queue_next */ uint64_t next_vert_count_trans = 0; #if USE_MPI MPI_Status status1, status2; #endif int recv_proc = ((irow*nproc_cols+jcol)/nproc_rows) + nproc_cols * ((irow*nproc_cols+jcol)%nproc_rows); assert(recv_proc < nproc_rows * nproc_cols); #if USE_MPI MPI_Sendrecv(&next_vert_count, 1, MPI_UNSIGNED_LONG, jcol*nproc_rows+irow, rank % (nproc_cols*nproc_rows), &next_vert_count_trans, 1, MPI_UNSIGNED_LONG, recv_proc, recv_proc, (g->comm_data).replicas_comm, &status1); MPI_Sendrecv(queue_next, next_vert_count, MPI_UNSIGNED, jcol*nproc_rows+irow, rank % (nproc_cols*nproc_rows), queue_current, next_vert_count_trans, MPI_UNSIGNED, recv_proc, recv_proc, (g->comm_data).replicas_comm, &status2); #else next_vert_count_trans = next_vert_count; memcpy(queue_current, queue_next, sizeof(uint32_t)*next_vert_count_trans); #endif /* Allgather */ sendbuf_counts[0] = next_vert_count_trans; #if USE_MPI MPI_Allgather(sendbuf_counts, 1, MPI_INT, recvbuf_counts, 1, MPI_INT, (g->comm_data).row_comm); #else recvbuf_counts[0] = sendbuf_counts[0]; #endif recvbuf_displs[0] = 0; for (int i=1; i<nproc_cols; i++) { recvbuf_displs[i] = recvbuf_displs[i-1] + recvbuf_counts[i-1]; } current_vert_count = recvbuf_displs[nproc_cols-1] + recvbuf_counts[nproc_cols-1]; assert(current_vert_count < n_local_row); #if USE_MPI MPI_Allgatherv(queue_current, next_vert_count_trans, MPI_UNSIGNED, queue_next, recvbuf_counts, recvbuf_displs, MPI_UNSIGNED, (g->comm_data).row_comm); #if 0 uint64_t current_vert_count_total = 0; uint64_t current_vert_count_max = 0; MPI_Barrier(MPI_COMM_WORLD); MPI_Allreduce(&current_vert_count, &current_vert_count_max, 1, MPI_UNSIGNED_LONG, MPI_MAX, (g->comm_data).col_comm); MPI_Allreduce(&current_vert_count, &current_vert_count_total, 1, MPI_UNSIGNED_LONG, MPI_SUM, (g->comm_data).col_comm); if ((rank == 0) && (current_vert_count_total > n_local/4)) fprintf(stderr, "gather, level %d, max %ld, total %ld, imbalance %3.4lf\n", vis_level, current_vert_count_max, current_vert_count_total, ((double) current_vert_count_max)*nproc_cols/current_vert_count_total); #endif #else memcpy(queue_next, queue_current, next_vert_count_trans*sizeof(uint32_t)); #endif tmp_queue_ptr = queue_current; queue_current = queue_next; queue_next = tmp_queue_ptr; #if TIME_BFS_SUBROUTINES MPI_Barrier((g->comm_data).row_comm); if (rank == 0) { trans_elt += get_seconds() - trans_elt_part; } #endif *nvisited += current_vert_count; //fprintf(stderr, "rank %d, current_vert_count %lu\n", // rank, current_vert_count); //uint64_t tmp_vert_count = current_vert_count; //current_vert_count = next_vert_count; next_vert_count = 0; #if USE_MPI MPI_Allreduce(&current_vert_count, &global_queue_vert_count, 1, MPI_UNSIGNED_LONG, MPI_SUM, (g->comm_data).col_comm); #else global_queue_vert_count = current_vert_count; #endif vis_level++; num_a2a_iterations++; /* if (rank == 0) fprintf(stderr, "vis level %lu, global vert count %lu\n", vis_level, global_queue_vert_count); */ /* for (i=0; i<nprocs; i++) { sendbuf_counts[i] = 0; recvbuf_counts[i] = 0; } */ assert(vis_level < 255); } #pragma omp barrier } #if USE_MPI if (tid == 0) { MPI_Allreduce(MPI_IN_PLACE, nvisited, 1, MPI_UNSIGNED_LONG, MPI_SUM, (g->comm_data).col_comm); MPI_Barrier((g->comm_data).replicas_comm); } #endif #if TIME_BFS_SUBROUTINES if ((rank == 0) && (tid == 0)) { full_elt = get_seconds() - full_elt; fprintf(stderr, "time: %9.6lf s.\n" "sift %9.6lf s. (%3.3lf), load %9.6lf s. (%3.3lf)\n" "a2a %9.6lf s. (%3.3lf), gather %9.6lf s. (%3.3lf)\n" "comm %9.6lf s. (%3.3lf), comp %9.6lf s. (%3.3lf)\n" "init %9.6lf s., vis levels %lu, nvisited %lu\n", full_elt, sift_elt, (sift_elt/full_elt)*100.0, load_elt, (load_elt/full_elt)*100.0, mpi_elt, (mpi_elt/full_elt)*100.0, trans_elt, (trans_elt/full_elt)*100.0, mpi_elt+trans_elt, ((mpi_elt+trans_elt)/full_elt)*100.0, sift_elt+load_elt, ((sift_elt+load_elt)/full_elt)*100.0, init_elt, vis_level, *nvisited); } #endif } //fprintf(stderr, "rank %d, total adj cut %ld\n", rank, adj_cut_count_total); //*pred_array_size_ptr = pred_vert_pair_count/2; *pred_array_size_ptr = pred_count/2; // fprintf(stderr, "rank %d, pred count: %lu\n", rank, pred_count/2); return 0; }
LBMClass.h
#ifndef LIDDRIVENCAVITYLBM_LBMCLASS_H #define LIDDRIVENCAVITYLBM_LBMCLASS_H #include <iostream> #include <vector> #include <math.h> #include <omp.h> #include "Eigen/Dense" using namespace std; extern const double Re; extern const int N; extern const int M; extern const double Utop,Vtop; extern const double Ubot,Vbot; extern const double Ulef,Vlef; extern const double Urig,Vrig; extern const int SAVETXT; extern const double THRESH; extern const int THREADS; extern const int MBounds; extern const int MCollide; extern const int PRECOND; extern const double GAMMA; extern const int INCOMP; extern const int BC; extern const int Q; extern const double MAXITER; extern const double NU; extern const double TAU; extern const double MAGIC; extern const double RHO0,U0,V0; extern const int N2; extern const int NQ; extern const double w[9]; extern const int c[9][2]; extern const int opp[9]; extern const int half[4]; extern const int prntInt; extern const int Nm1,Mm1; extern const int Nm2,Mm2; extern const int GM[9][9]; extern const double GMinv[9][9]; extern double start,stop; extern const int IBN,IB; extern const double IBcenter[2],IBradius; class LBMClass{ public: LBMClass(): _f1(NQ,0.0), _f2(NQ,0.0), _fstar(NQ, 0.0),_u1(N2, U0) ,_u2(N2, V0), _rho(N2, RHO0),_p(N2, 0.0), _x(N, 0.0),_y(M, 0.0), _error(100, 0.0),_vmag(N2,0.0), _stress(N2, 0.0), _vort(N2, 0.0),_dudx(N2, 0.0),_dudy(N2, 0.0),_dvdx(N2, 0.0),_dvdy(N2, 0.0),_forceX(N2, 0.0),_forceY(N2, 0.0), _df(), _df0(),_MACHSTAR(0.0), _TAU_P(0.0), _OMEGA(0.0), _OMEGAm(0.0), _CS(0.0), _MACH(0.0), _rhobar(1.0), _filename1(),_filename2(),_omega_e(),_omega_eps(),_omega_q(),_omega_nu(), _GS{}, _Umax(), _IBrx(IBN,0.0), _IBry(IBN,0.0), _IBur(IBN,0.0), _IBvr(IBN,0.0), _IBFx(IBN,0.0), _IBFy(IBN,0.0), _IBub(IBN,0.0), _IBvb(IBN,0.0), _IBmatrixAx(IBN,IBN), _IBmatrixAy(IBN,IBN), _IBmatrixAxInv(IBN,IBN), _IBmatrixAyInv(IBN,IBN), _fBodyX(),_fBodyY(),_IBds(),_IBrxmx(),_IBrxmn(),_IBrymx(),_IBrymn() { // Initialize x if (MBounds == 0){ linspace(_x,(0.5 / N), (N - 0.5) / (double) N,N); linspace(_y,(0.5 / M), (M - 0.5) / (double) N,M); } else{ linspace(_x,0.0,1.0 ,N); linspace(_y,0.0,(double) M / (double) N ,M); } _CS = 1.0 / sqrt(3.0); _MACH = Utop / _CS; _MACHSTAR = _MACH / sqrt(GAMMA); _TAU_P = 0.5 + (TAU - 0.5) / GAMMA; _OMEGA = 1.0 / _TAU_P; _OMEGAm = 1.0 / (0.5 + ((MAGIC) / ((1.0 / (_OMEGA)) - 0.5))); _Umax = max(max(Utop,Ubot),max(Vlef,Vrig)); _omega_e = 1.1; // Bulk viscosity _omega_eps = 1.1; // free parameter _omega_q = 1.1; // free parameter _omega_nu = _OMEGA; // Shear viscosity _GS[0] = 0.0; _GS[1] = _omega_e; _GS[2] = _omega_eps; _GS[3] = 0.0; _GS[4] = _omega_q; _GS[5] = 0.0; _GS[6] = _omega_q; _GS[7] = _omega_nu; _GS[8] = _omega_nu; //_GS = {0.0, _omega_e, _omega_eps, 0.0, _omega_q, 0.0, _omega_q, _omega_nu, _omega_nu}; // SRT //const double _GS[Q] = {0.0, _OMEGA, _OMEGA, 0.0, _OMEGA, 0.0, _OMEGA, _OMEGA, _OMEGA}; // Mohamad Textbook //const double _GS[Q] = {0.0, 1.4, 1.4, 0.0, 1.2, 0.0, 1.2, _OMEGA, _OMEGA}; // High Re from Zhen-Hua et al. //const double _GS[Q] = {0.0, 1.1, 1.0, 0.0, 1.2, 0.0, 1.2, _OMEGA, _OMEGA}; // _GS[0] = 0.0; // _GS[1] = 1.1; // _GS[2] = 1.0; // _GS[3] = 0.0; // _GS[4] = 1.2; // _GS[5] = 0.0; // _GS[6] = 1.2; // _GS[7] = _omega_nu; // _GS[8] = _omega_nu; sprintf(_filename1,"Solution_n=%d_m=%d_Re=%.0f_BCM=%d_CM=%d_G=%0.2f_U=%0.2f.dat",N,M,Re,MBounds,MCollide, GAMMA, Utop); sprintf(_filename2,"Error_n=%d_m=%d_Re=%.0f_BCM=%d_CM=%d_G=%0.2f_U=%0.2f.txt",N,M,Re,MBounds,MCollide, GAMMA,Utop); // Initialize f int ind1,ind2; #pragma omp parallel for private(ind1,ind2) collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { ind1 = LU2(i, j); for (int k = 0; k < Q; k++) { ind2 = LU3(i, j, k); _f1[ind2] = calcfeq(k,ind1); _f2[ind2] = _f1[ind2]; } } } if (BC == 1){ _fBodyX = 8.0 * NU * _Umax / (M*M); } if (IB==1) { //***** Immersed Boundary Initialization ***** // initialize position of points on boundary for (int i = 0; i < IBN; i++){ _IBrx[i] = IBcenter[0] + IBradius * cos((i * 360.0 / (double) IBN) * (_pi/180.0)); _IBry[i] = IBcenter[1] + IBradius * sin((i * 360.0 / (double) IBN) * (_pi/180.0)); } // Find IB region for (int i = 0; i < IBN; i++){ if (i == 0){ _IBrxmx = _IBrx[0]; _IBrxmn = _IBrx[0]; _IBrymx = _IBry[0]; _IBrymn = _IBry[0]; } else { if (_IBrx[i] < _IBrxmn) _IBrxmn = _IBrx[i]; if (_IBrx[i] > _IBrxmx) _IBrxmx = _IBrx[i]; if (_IBry[i] < _IBrymn) _IBrymn = _IBry[i]; if (_IBry[i] > _IBrymx) _IBrymx = _IBry[i]; } } // Find loop bounds _IBrxmx = min(ceil(_IBrxmx + 2), (double) Nm1); _IBrymx = min(ceil(_IBrymx + 2), (double) Mm1); _IBrxmn = max(floor(_IBrxmn - 2), 0.0); _IBrymn = max(floor(_IBrymn - 2), 0.0); //_IBrxmx = Nm1; //_IBrymx = Mm1; //_IBrxmn = 0.0; //_IBrymn = 0.0; _IBds = 2.0 * _pi * IBradius / (double) IBN; printf("Points of Immersed Boundary:\n"); for (int i = 0; i < IBN; i++) printf("(%.3f, %.3f)\n", _IBrx[i],_IBry[i]); printf("X IB (%d,%d)\n",(int)_IBrxmn,(int)_IBrxmx); printf("Y IB (%d,%d)\n",(int)_IBrymn,(int)_IBrymx); // Velocity of boundary for (int i = 0; i < IBN; i++){ _IBub[i] = 0.0; _IBub[i] = 0.0; } // Calculate matrix A _IBmatrixAx = Eigen::MatrixXd::Zero(IBN,IBN); _IBmatrixAy = Eigen::MatrixXd::Zero(IBN,IBN); double Dirac1,Dirac2; #pragma omp parallel for collapse(2) private(Dirac1,Dirac2) for (int i = 0; i < IBN; i++){ for (int j = 0; j < IBN; j++){ for (int k = (int)_IBrxmn; k <= (int)_IBrxmx; k++){ for (int l = (int)_IBrymn; l <= (int)_IBrymx; l++){ Dirac1 = diracdelta(_IBrx[i] - k) * diracdelta(_IBry[i] - l); Dirac2 = diracdelta(_IBrx[j] - k) * diracdelta(_IBry[j] - l); _IBmatrixAx(i,j) += _IBds * Dirac1 * Dirac2; _IBmatrixAy(i,j) += _IBds * Dirac1 * Dirac2; } } } } // Calculate inverse of matrix A _IBmatrixAxInv = _IBmatrixAx.inverse(); _IBmatrixAyInv = _IBmatrixAy.inverse(); } // Print parameters printf("Re =\t%.0f\n", Re); printf("U =\t%.3e\n", Utop); printf("M =\t%.3e\n", Utop * sqrt(3)); printf("N =\t%d\n", N); printf("M =\t%d\n", M); printf("tau =\t%.3e\n", _TAU_P); printf("nu =\t%.3e\n", NU); printf("Gamma =\t%.3e\n", GAMMA); if (PRECOND == 1){ printf("_MACH* =\t%.3e\n", _MACHSTAR); } } // Collide Methods inline void collideSRT() { int ind1{}, ind2{}; double Fsource{},fTotalx{},fTotaly{}; #pragma omp parallel for private(ind1, ind2,Fsource,fTotalx,fTotaly) collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { ind1 = LU2(i, j); fTotalx = _forceX[ind1] + _fBodyX; fTotaly = _forceY[ind1] + _fBodyY; for (int k = 0; k < Q; k++) { ind2 = LU3(i, j, k); _fstar[ind2] = (1.0 - _OMEGA) * _f1[ind2] + _OMEGA * calcfeq(k, ind1); Fsource = (1.0 - 0.5 * _OMEGA) * w[k] * (3.0 * (c[k][0] - _u1[ind1]) + 9.0 * (c[k][0] * _u1[ind1] + c[k][1] * _u2[ind1]) * c[k][0]) * fTotalx + (3.0 * (c[k][1] - _u2[ind1]) + 9.0 * (c[k][0] * _u1[ind1] + c[k][1] * _u2[ind1]) * c[k][1]) * fTotaly; _fstar[ind2] += Fsource; } } } if (BC == 0 || BC == 1) virtualnode(); } inline void collideTRT(){ double fplus{},fminus{},feqplus{},feqminus{},feq[9]{}; int ind1{},l{},notl{}; #pragma omp parallel for private(fplus,fminus,feqplus,feqminus,feq,l,notl,ind1) collapse(2) for (int i = 0; i < N; i++){ for (int j = 0; j < M; j++){ ind1 = LU2(i, j); for (int k = 0; k < Q; k++) feq[k] = calcfeq(k,ind1); // Rest population fplus = _f1[LU3(i,j,0)]; feqplus = feq[0]; _fstar[LU3(i, j, 0)] = _f1[LU3(i, j, 0)] - _OMEGA * (fplus - feqplus); for (int k = 0; k < 4; k++) { l = half[k]; notl = opp[l]; fplus = 0.5 * (_f1[LU3(i,j,l)] + _f1[LU3(i,j,notl)]); fminus = 0.5 * (_f1[LU3(i,j,l)] - _f1[LU3(i,j,notl)]); feqplus = 0.5 * (feq[l] + feq[notl]); feqminus = 0.5 * (feq[l] - feq[notl]); fplus = _OMEGA * (fplus - feqplus); fminus = _OMEGAm * (fminus - feqminus); _fstar[LU3(i, j, l)] = _f1[LU3(i, j, l)] - fplus - fminus; _fstar[LU3(i, j, notl)] = _f1[LU3(i, j, notl)] - fplus + fminus; } } } if (BC == 0 || BC == 1) virtualnode(); } inline void collideMRT(){ #pragma omp parallel { int ind1; vector<double> _meq(Q,0.0),_mstar(Q,0.0); // moments double _m{},fTotalx,fTotaly; // moments #pragma omp for collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { ind1 = LU2(i,j); fTotalx = _forceX[ind1] + _fBodyX; fTotaly = _forceY[ind1] + _fBodyY; calcmeq(_meq, _u1[ind1], _u2[ind1], _rho[ind1],_p[ind1]); for (int k = 0; k < Q; k++){ _m = GM[k][0] * _f1[LU3(i,j,0)] + GM[k][1] * _f1[LU3(i,j,1)] + GM[k][2] * _f1[LU3(i,j,2)] +GM[k][3] * _f1[LU3(i,j,3)] + GM[k][4] * _f1[LU3(i,j,4)] + GM[k][5] * _f1[LU3(i,j,5)] + GM[k][6] * _f1[LU3(i,j,6)] + GM[k][7] * _f1[LU3(i,j,7)] + GM[k][8] * _f1[LU3(i,j,8)]; _mstar[k] = _m - _GS[k] * (_m - _meq[k]); } // Forces _mstar[1] += (1.0 - 0.5 * _GS[1]) * (6.0 * (fTotalx * _u1[ind1] + fTotaly * _u2[ind1])); _mstar[2] += (1.0 - 0.5 * _GS[2]) * (-6.0 * (fTotalx * _u1[ind1] + fTotaly * _u2[ind1])); _mstar[3] += (1.0 - 0.5 * _GS[3]) * (fTotalx); _mstar[4] += (1.0 - 0.5 * _GS[4]) * (-fTotalx); _mstar[5] += (1.0 - 0.5 * _GS[5]) * (fTotaly); _mstar[6] += (1.0 - 0.5 * _GS[6]) * (-fTotaly); _mstar[7] += (1.0 - 0.5 * _GS[7]) * (2.0 * (fTotalx * _u1[ind1] - fTotaly * _u2[ind1])); _mstar[8] += (1.0 - 0.5 * _GS[8]) * (fTotaly * _u1[ind1] + fTotalx * _u2[ind1]); for (int k = 0; k < Q; k++){ _fstar[LU3(i, j, k)] = GMinv[k][0] * _mstar[0] + GMinv[k][1] * _mstar[1] + GMinv[k][2] * _mstar[2] + GMinv[k][3] * _mstar[3] + GMinv[k][4] * _mstar[4] + GMinv[k][5] * _mstar[5] + GMinv[k][6] * _mstar[6] + GMinv[k][7] * _mstar[7] + GMinv[k][8] * _mstar[8]; } } } } if (BC == 0 || BC == 1) virtualnode(); } // Stream Methods inline void streamPush() { #pragma omp parallel { int inew, jnew; #pragma omp for collapse(3) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { for (int k = 0; k < Q; k++) { inew = i + c[k][0]; jnew = j + c[k][1]; if (inew < N && inew >= 0 && jnew < M && jnew >= 0) _f2[LU3(inew, jnew, k)] = _fstar[LU3(i, j, k)]; } } } } } inline void streamPull() { #pragma omp parallel { int iold, jold; #pragma omp for collapse(3) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { for (int k = 0; k < Q; k++) { iold = i - c[k][0]; jold = j - c[k][1]; if (iold < N && iold >= 0 && jold < M && jold >= 0) _f2[LU3(i, j, k)] = _fstar[LU3(iold, jold, k)]; } } } } } // Bounday Condition Methods // Wind tunnel simulation inline void uniformFlow(){ double rho{1.0}, sixth=1.0/6.0,twothirds=2.0/3.0,twelfth=1.0/12.0,Ucorner{}; // Outflow, 2nd order polynomial extrapolation double uout{}; for (int i = 1; i < Mm1 ;i++) { uout = -1.0 + _f2[LU3(Nm1, i, 0)] + _f2[LU3(Nm1, i, 2)] + _f2[LU3(Nm1, i, 4)] + 2.0 * (_f2[LU3(Nm1, i, 1)] + _f2[LU3(Nm1, i, 5)] + _f2[LU3(Nm1, i, 8)]); _f2[LU3(Nm1, i, 3)] = _f2[LU3(Nm1, i, 1)] - twothirds * uout; _f2[LU3(Nm1, i, 7)] = _f2[LU3(Nm1, i, 5)] + 0.5 * (_f2[LU3(Nm1, i, 2)]-_f2[LU3(Nm1, i, 4)]) - sixth * uout; _f2[LU3(Nm1, i, 6)] = _f2[LU3(Nm1, i, 8)] - 0.5 * (_f2[LU3(Nm1, i, 2)]-_f2[LU3(Nm1, i, 4)]) - sixth * uout; } // Inflow, Dirichlet BC for (int i = 0; i < M ;i++) { // rho = _f2[LU3(0, i, 0)] + _f2[LU3(0, i, 2)] + _f2[LU3(0, i, 4)] + 2.0 * (_f2[LU3(0, i, 3)] + _f2[LU3(0, i, 6)] + _f2[LU3(0, i, 7)]); _f2[LU3(0, i, 1)] = _f2[LU3(0, i, 3)] + twothirds * rho * Ulef; _f2[LU3(0, i, 5)] = _f2[LU3(0, i, 7)] + sixth * rho * Ulef; _f2[LU3(0, i, 8)] = _f2[LU3(0, i, 6)] + sixth * rho * Ulef; } // half-way Specular reflection on top and bottom for (int i = 0; i < N; i++) { // Top _f2[LU3(i, Mm1, 4)] = _f2[LU3(i, Mm1, 2)]; _f2[LU3(i, Mm1, 7)] = _f2[LU3(i, Mm1, 6)]; _f2[LU3(i, Mm1, 8)] = _f2[LU3(i, Mm1, 5)]; // Bottom _f2[LU3(i, 0, 2)] = _f2[LU3(i, 0, 4)]; _f2[LU3(i, 0, 5)] = _f2[LU3(i, 0, 8)]; _f2[LU3(i, 0, 6)] = _f2[LU3(i, 0, 7)]; } // Top Left (0,Nm1) knowns: 1,4,8, unknowns: 0, 5, 7 // rho = _rho[LU2(0,Mm1)]; // Ucorner = Ulef; // _f2[LU3(0, Mm1, 1)] = _f2[LU3(0, Mm1, 3)] + twothirds * rho * Ucorner; // _f2[LU3(0, Mm1, 4)] = _f2[LU3(0, Mm1, 2)]; // _f2[LU3(0, Mm1, 8)] = _f2[LU3(0, Mm1, 6)] + sixth * rho * (Ucorner); // _f2[LU3(0, Mm1, 5)] = twelfth * rho * (Ucorner); // _f2[LU3(0, Mm1, 7)] = -_f2[LU3(0, Mm1, 5)]; // _f2[LU3(0, Mm1, 0)] = rho - (_f2[LU3(0, Mm1, 1)] + _f2[LU3(0, Mm1, 2)] + _f2[LU3(0, Mm1, 3)] + _f2[LU3(0, Mm1, 4)] + _f2[LU3(0, Mm1, 5)] + _f2[LU3(0, Mm1, 6)] + _f2[LU3(0, Mm1, 7)] + _f2[LU3(0, Mm1, 8)]); // rho = _rho[LU2(0,0)]; // Ucorner = Ulef; // _f2[LU3(0, 0, 1)] = _f2[LU3(0, 0, 3)] + twothirds * rho * Ucorner; // _f2[LU3(0, 0, 2)] = _f2[LU3(0, 0, 4)]; // _f2[LU3(0, 0, 5)] = _f2[LU3(0, 0, 7)] + sixth * rho * (Ucorner); // _f2[LU3(0, 0, 6)] = twelfth * rho * (Ucorner); // _f2[LU3(0, 0, 8)] = -_f2[LU3(0, 0, 6)]; // _f2[LU3(0, 0, 0)] = rho - (_f2[LU3(0, 0, 1)] + _f2[LU3(0, 0, 2)] + _f2[LU3(0, 0, 3)] + _f2[LU3(0, 0, 4)] + _f2[LU3(0, 0, 5)] + _f2[LU3(0, 0, 6)] + _f2[LU3(0, 0, 7)] + _f2[LU3(0, 0, 8)]); } inline void NEBB(){ switch (BC){ case 2 : { // Lid-driven cavity flow const double sixth = 1.0 / 6.0, twothirds = 2.0 / 3.0, twelfth = 1.0 / 12.0; double rho{_rhobar},Ucorner{},Vcorner{}; #pragma omp parallel for private(rho) for (int i = 1; i < N - 1; i++) { // Top wall, general case rho = _f2[LU3(i, Mm1, 0)] + _f2[LU3(i, Mm1, 1)] + _f2[LU3(i, Mm1, 3)] + 2.0 * (_f2[LU3(i, Mm1, 2)] + _f2[LU3(i, Mm1, 6)] + _f2[LU3(i, Mm1, 5)]); _f2[LU3(i, Mm1, 4)] = _f2[LU3(i, Mm1, 2)] - twothirds * rho * Vtop; _f2[LU3(i, Mm1, 7)] = _f2[LU3(i, Mm1, 5)] + 0.5 * (_f2[LU3(i, Mm1, 1)] - _f2[LU3(i, Mm1, 3)]) - 0.5 * rho * Utop - sixth * rho * Vtop; _f2[LU3(i, Mm1, 8)] = _f2[LU3(i, Mm1, 6)] - 0.5 * (_f2[LU3(i, Mm1, 1)] - _f2[LU3(i, Mm1, 3)]) + 0.5 * rho * Utop - sixth * rho * Vtop; // Bottom wall, general case rho = _f2[LU3(i, 0, 0)] + _f2[LU3(i, 0, 1)] + _f2[LU3(i, 0, 3)] + 2.0 * (_f2[LU3(i, 0, 4)] + _f2[LU3(i, 0, 7)] + _f2[LU3(i, 0, 8)]); _f2[LU3(i, 0, 2)] = _f2[LU3(i, 0, 4)] + twothirds * rho * Vbot; _f2[LU3(i, 0, 5)] = _f2[LU3(i, 0, 7)] - 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]) + 0.5 * rho * Ubot + sixth * rho * Vbot; _f2[LU3(i, 0, 6)] = _f2[LU3(i, 0, 8)] + 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]) - 0.5 * rho * Ubot + sixth * rho * Vbot; } #pragma omp parallel for private(rho) for (int i = 1; i < Mm1; i++) { // Left wall, general case rho = _f2[LU3(0, i, 0)] + _f2[LU3(0, i, 2)] + _f2[LU3(0, i, 4)] + 2.0 * (_f2[LU3(0, i, 3)] + _f2[LU3(0, i, 6)] + _f2[LU3(0, i, 7)]); _f2[LU3(0, i, 1)] = _f2[LU3(0, i, 3)] + twothirds * rho * Ulef; _f2[LU3(0, i, 5)] = _f2[LU3(0, i, 7)] - 0.5 * (_f2[LU3(0, i, 2)] - _f2[LU3(0, i, 4)]) + 0.5 * rho * Vlef + sixth * rho * Ulef; _f2[LU3(0, i, 8)] = _f2[LU3(0, i, 6)] + 0.5 * (_f2[LU3(0, i, 2)] - _f2[LU3(0, i, 4)]) - 0.5 * rho * Vlef + sixth * rho * Ulef; // Right wall, general case rho = _f2[LU3(Nm1, i, 0)] + _f2[LU3(Nm1, i, 2)] + _f2[LU3(Nm1, i, 4)] + 2.0 * (_f2[LU3(Nm1, i, 1)] + _f2[LU3(Nm1, i, 5)] + _f2[LU3(Nm1, i, 8)]); _f2[LU3(Nm1, i, 3)] = _f2[LU3(Nm1, i, 1)] - twothirds * rho * Urig; _f2[LU3(Nm1, i, 7)] = _f2[LU3(Nm1, i, 5)] + 0.5 * (_f2[LU3(Nm1, i, 2)] - _f2[LU3(Nm1, i, 4)]) - 0.5 * rho * Vrig - sixth * rho * Urig; _f2[LU3(Nm1, i, 6)] = _f2[LU3(Nm1, i, 8)] - 0.5 * (_f2[LU3(Nm1, i, 2)] - _f2[LU3(Nm1, i, 4)]) + 0.5 * rho * Vrig - sixth * rho * Urig; } // Corners rho = _rhobar; // Bottom Left (0,0) knowns: 1,5,2, unknowns: 0,6,8 rho = _rho[LU2(0,0)]; Vcorner = max(Vbot, Vlef); Ucorner = max(Ubot, Ulef); _f2[LU3(0, 0, 1)] = _f2[LU3(0, 0, 3)] + twothirds * rho * Ucorner; _f2[LU3(0, 0, 2)] = _f2[LU3(0, 0, 4)] + twothirds * rho * Vcorner; _f2[LU3(0, 0, 5)] = _f2[LU3(0, 0, 7)] + sixth * rho * (Ucorner + Vcorner); _f2[LU3(0, 0, 6)] = twelfth * rho * (Vcorner - Ucorner); _f2[LU3(0, 0, 8)] = -_f2[LU3(0, 0, 6)]; _f2[LU3(0, 0, 0)] = rho - (_f2[LU3(0, 0, 1)] + _f2[LU3(0, 0, 2)] + _f2[LU3(0, 0, 3)] + _f2[LU3(0, 0, 4)] + _f2[LU3(0, 0, 5)] + _f2[LU3(0, 0, 6)] + _f2[LU3(0, 0, 7)] + _f2[LU3(0, 0, 8)]); // Bottom Right (Nm1,0) knowns: 2,3,6, unknowns: 0, 5, 7 rho = _rho[LU2(Nm1,0)]; Vcorner = max(Vbot, Vrig); Ucorner = max(Ubot, Urig); _f2[LU3(Nm1, 0, 2)] = _f2[LU3(Nm1, 0, 4)] + twothirds * rho * Vcorner; _f2[LU3(Nm1, 0, 3)] = _f2[LU3(Nm1, 0, 1)] - twothirds * rho * Ucorner; _f2[LU3(Nm1, 0, 6)] = _f2[LU3(Nm1, 0, 8)] + sixth * rho * (-Ucorner + Vcorner); _f2[LU3(Nm1, 0, 5)] = twelfth * rho * (Vcorner + Ucorner); _f2[LU3(Nm1, 0, 7)] = -_f2[LU3(Nm1, 0, 5)]; _f2[LU3(Nm1, 0, 0)] = rho - (_f2[LU3(Nm1, 0, 1)] + _f2[LU3(Nm1, 0, 2)] + _f2[LU3(Nm1, 0, 3)] + _f2[LU3(Nm1, 0, 4)] + _f2[LU3(Nm1, 0, 5)] + _f2[LU3(Nm1, 0, 6)] + _f2[LU3(Nm1, 0, 7)] + _f2[LU3(Nm1, 0, 8)]); // Top Left (0,Nm1) knowns: 1,4,8, unknowns: 0, 5, 7 rho = _rho[LU2(0,Mm1)]; Vcorner = max(Vtop, Vlef); Ucorner = max(Utop, Ulef); _f2[LU3(0, Mm1, 1)] = _f2[LU3(0, Mm1, 3)] + twothirds * rho * Ucorner; _f2[LU3(0, Mm1, 4)] = _f2[LU3(0, Mm1, 2)] - twothirds * rho * Vcorner; _f2[LU3(0, Mm1, 8)] = _f2[LU3(0, Mm1, 6)] + sixth * rho * (Ucorner - Vcorner); _f2[LU3(0, Mm1, 5)] = twelfth * rho * (Vcorner + Ucorner); _f2[LU3(0, Mm1, 7)] = -_f2[LU3(0, Mm1, 5)]; _f2[LU3(0, Mm1, 0)] = rho - (_f2[LU3(0, Mm1, 1)] + _f2[LU3(0, Mm1, 2)] + _f2[LU3(0, Mm1, 3)] + _f2[LU3(0, Mm1, 4)] + _f2[LU3(0, Mm1, 5)] + _f2[LU3(0, Mm1, 6)] + _f2[LU3(0, Mm1, 7)] + _f2[LU3(0, Mm1, 8)]); // Top Right (Nm1,Nm1) knowns: 3,7,4, unknowns: 0, 6, 8 rho = _rho[LU2(Nm1,Mm1)]; Vcorner = max(Vtop, Vrig); Ucorner = max(Utop, Urig); _f2[LU3(Nm1, Mm1, 4)] = _f2[LU3(Nm1, Mm1, 2)] - twothirds * rho * Vcorner; _f2[LU3(Nm1, Mm1, 3)] = _f2[LU3(Nm1, Mm1, 1)] - twothirds * rho * Ucorner; _f2[LU3(Nm1, Mm1, 7)] = _f2[LU3(Nm1, Mm1, 5)] - sixth * rho * (Ucorner + Vcorner); _f2[LU3(Nm1, Mm1, 6)] = twelfth * rho * (Vcorner - Ucorner); _f2[LU3(Nm1, Mm1, 8)] = -_f2[LU3(Nm1, Mm1, 6)]; _f2[LU3(Nm1, Mm1, 0)] = rho - (_f2[LU3(Nm1, Mm1, 1)] + _f2[LU3(Nm1, Mm1, 2)] + _f2[LU3(Nm1, Mm1, 3)] + _f2[LU3(Nm1, Mm1, 4)] + _f2[LU3(Nm1, Mm1, 5)] + _f2[LU3(Nm1, Mm1, 6)] + _f2[LU3(Nm1, Mm1, 7)] + _f2[LU3(Nm1, Mm1, 8)]); break; } case 1: { // Poiseuille Flow #pragma omp parallel for for (int i = 0; i < N; i++) { // Top wall, general case _f2[LU3(i, Mm1, 4)] = _f2[LU3(i, Mm1, 2)]; _f2[LU3(i, Mm1, 7)] = _f2[LU3(i, Mm1, 5)] + 0.5 * (_f2[LU3(i, Mm1, 1)] - _f2[LU3(i, Mm1, 3)]); _f2[LU3(i, Mm1, 8)] = _f2[LU3(i, Mm1, 6)] - 0.5 * (_f2[LU3(i, Mm1, 1)] - _f2[LU3(i, Mm1, 3)]); // Bottom wall, general case _f2[LU3(i, 0, 2)] = _f2[LU3(i, 0, 4)]; _f2[LU3(i, 0, 5)] = _f2[LU3(i, 0, 7)] - 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]); _f2[LU3(i, 0, 6)] = _f2[LU3(i, 0, 8)] + 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]); } break; } case 0: { // Couette Flow double rho{}; #pragma omp parallel for private(rho) for (int i = 0; i < N; i++) { // Top wall, general case rho = _f2[LU3(i, Mm1, 0)] + _f2[LU3(i, Mm1, 1)] + _f2[LU3(i, Mm1, 3)] + 2.0 * (_f2[LU3(i, Mm1, 2)] + _f2[LU3(i, Mm1, 6)] + _f2[LU3(i, Mm1, 5)]); _f2[LU3(i, Mm1, 4)] = _f2[LU3(i, Mm1, 2)]; _f2[LU3(i, Mm1, 7)] = _f2[LU3(i, Mm1, 5)] + 0.5 * (_f2[LU3(i, Mm1, 1)] - _f2[LU3(i, Mm1, 3)]) - 0.5 * rho * Utop; _f2[LU3(i, Mm1, 8)] = _f2[LU3(i, Mm1, 6)] - 0.5 * (_f2[LU3(i, Mm1, 1)] - _f2[LU3(i, Mm1, 3)]) + 0.5 * rho * Utop; // Bottom wall, general case rho = _f2[LU3(i, 0, 0)] + _f2[LU3(i, 0, 1)] + _f2[LU3(i, 0, 3)] + 2.0 * (_f2[LU3(i, 0, 4)] + _f2[LU3(i, 0, 7)] + _f2[LU3(i, 0, 8)]); _f2[LU3(i, 0, 2)] = _f2[LU3(i, 0, 4)]; _f2[LU3(i, 0, 5)] = _f2[LU3(i, 0, 7)] - 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]) + 0.5 * rho * Ubot; _f2[LU3(i, 0, 6)] = _f2[LU3(i, 0, 8)] + 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]) - 0.5 * rho * Ubot; } break; } default: { std::printf("Error: Invalid Boundary condition case number\n"); exit(1); } } } // Non-equilibrium extrapolation inline void NEE() { switch (BC){ case 2: { // Lid-driven cavity flow #pragma omp parallel { double rhof{},u1f{},u2f{}; double rhowall{_rhobar}; #pragma omp for for (int i = 1; i < N-1; i++){ // Bottom wall, (i, 0) rhowall = _f2[LU3(i,0,0)] +_f2[LU3(i,0,1)] +_f2[LU3(i,0,3)] + 2.0 * (_f2[LU3(i,0,4)] +_f2[LU3(i,0,7)] +_f2[LU3(i,0,8)]); rhof = _f2[LU3(i,1,0)] + _f2[LU3(i,1,1)] + _f2[LU3(i,1,2)] + _f2[LU3(i,1,3)] + _f2[LU3(i,1,4)] + _f2[LU3(i,1,5)]+ _f2[LU3(i,1,6)]+ _f2[LU3(i,1,7)]+ _f2[LU3(i,1,8)]; u1f = ((_f2[LU3(i,1,1)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,8)]) - (_f2[LU3(i,1,3)] + _f2[LU3(i,1,6)] + _f2[LU3(i,1,7)])); u2f = ((_f2[LU3(i,1,2)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,6)]) - (_f2[LU3(i,1,4)] + _f2[LU3(i,1,7)] + _f2[LU3(i,1,8)])); if (INCOMP != 1){ u1f /= rhof; u2f /= rhof; } for (int k = 0; k < Q; k++) _f2[LU3(i,0,k)] = calcfeq(k,Ubot,Vbot,rhowall) + (_f2[LU3(i,1,k)] - calcfeq(k, u1f, u2f, rhof)); // Top Wall, (i,Nm1) rhowall = _f2[LU3(i,Mm1,0)] +_f2[LU3(i,Mm1,1)] +_f2[LU3(i,Mm1,3)] + 2.0 * (_f2[LU3(i,Mm1,2)] +_f2[LU3(i,Mm1,6)] +_f2[LU3(i,Mm1,5)]); rhof = _f2[LU3(i,Mm2,0)] + _f2[LU3(i,Mm2,1)] + _f2[LU3(i,Mm2,2)] + _f2[LU3(i,Mm2,3)] + _f2[LU3(i,Mm2,4)] + _f2[LU3(i,Mm2,5)]+ _f2[LU3(i,Mm2,6)]+ _f2[LU3(i,Mm2,7)]+ _f2[LU3(i,Mm2,8)]; u1f = ((_f2[LU3(i,Mm2,1)] + _f2[LU3(i,Mm2,5)] + _f2[LU3(i,Mm2,8)]) - (_f2[LU3(i,Mm2,3)] + _f2[LU3(i,Mm2,6)] + _f2[LU3(i,Mm2,7)])); u2f = ((_f2[LU3(i,Mm2,2)] + _f2[LU3(i,Mm2,5)] + _f2[LU3(i,Mm2,6)]) - (_f2[LU3(i,Mm2,4)] + _f2[LU3(i,Mm2,7)] + _f2[LU3(i,Mm2,8)])); if (INCOMP != 1){ u1f /= rhof; u2f /= rhof; } for (int k = 0; k < Q; k++) _f2[LU3(i,Mm1,k)] = calcfeq(k,Utop,Vtop,rhowall) + (_f2[LU3(i,Mm2,k)] - calcfeq(k, u1f, u2f, rhof)); } #pragma omp for for (int i = 1; i < Mm1; i++){ // Left wall, (0,i) rhowall = _f2[LU3(0,i,0)] +_f2[LU3(0,i,2)] +_f2[LU3(0,i,4)] + 2.0 * (_f2[LU3(0,i,3)] +_f2[LU3(0,i,6)] +_f2[LU3(0,i,7)]); rhof = _f2[LU3(1,i,0)] + _f2[LU3(1,i,1)] + _f2[LU3(1,i,2)] + _f2[LU3(1,i,3)] + _f2[LU3(1,i,4)] + _f2[LU3(1,i,5)]+ _f2[LU3(1,i,6)]+ _f2[LU3(1,i,7)]+ _f2[LU3(1,i,8)]; u1f = ((_f2[LU3(1,i,1)] + _f2[LU3(1,i,5)] + _f2[LU3(1,i,8)]) - (_f2[LU3(1,i,3)] + _f2[LU3(1,i,6)] + _f2[LU3(1,i,7)])); u2f = ((_f2[LU3(1,i,2)] + _f2[LU3(1,i,5)] + _f2[LU3(1,i,6)]) - (_f2[LU3(1,i,4)] + _f2[LU3(1,i,7)] + _f2[LU3(1,i,8)])); if (INCOMP != 1){ u1f /= rhof; u2f /= rhof; } for (int k = 0; k < Q; k++) _f2[LU3(0,i,k)] = calcfeq(k,Ulef,Vlef,rhowall) + (_f2[LU3(1,i,k)] - calcfeq(k, u1f, u2f, rhof)); // Right Wall (Nm1, i) rhowall = _f2[LU3(Nm1,i,0)] +_f2[LU3(Nm1,i,2)] +_f2[LU3(Nm1,i,4)] + 2.0 * (_f2[LU3(Nm1,i,1)] +_f2[LU3(Nm1,i,5)] +_f2[LU3(Nm1,i,8)]); rhof = _f2[LU3(Nm2,i,0)] + _f2[LU3(Nm2,i,1)] + _f2[LU3(Nm2,i,2)] + _f2[LU3(Nm2,i,3)] + _f2[LU3(Nm2,i,4)] + _f2[LU3(Nm2,i,5)]+ _f2[LU3(Nm2,i,6)]+ _f2[LU3(Nm2,i,7)]+ _f2[LU3(Nm2,i,8)]; u1f = ((_f2[LU3(Nm2,i,1)] + _f2[LU3(Nm2,i,5)] + _f2[LU3(Nm2,i,8)]) - (_f2[LU3(Nm2,i,3)] + _f2[LU3(Nm2,i,6)] + _f2[LU3(Nm2,i,7)])); u2f = ((_f2[LU3(Nm2,i,2)] + _f2[LU3(Nm2,i,5)] + _f2[LU3(Nm2,i,6)]) - (_f2[LU3(Nm2,i,4)] + _f2[LU3(Nm2,i,7)] + _f2[LU3(Nm2,i,8)])); if (INCOMP != 1){ u1f /= rhof; u2f /= rhof; } for (int k = 0; k < Q; k++) _f2[LU3(Nm1,i,k)] = calcfeq(k,Urig,Vrig,rhowall) + (_f2[LU3(Nm2,i,k)] - calcfeq(k, u1f, u2f, rhof)); } // Corners // Bottom left (0,0) rhof = _f2[LU3(1,1,0)] + _f2[LU3(1,1,1)] + _f2[LU3(1,1,2)] + _f2[LU3(1,1,3)] + _f2[LU3(1,1,4)] + _f2[LU3(1,1,5)]+ _f2[LU3(1,1,6)]+ _f2[LU3(1,1,7)]+ _f2[LU3(1,1,8)]; u1f = ((_f2[LU3(1,1,1)] + _f2[LU3(1,1,5)] + _f2[LU3(1,1,8)]) - (_f2[LU3(1,1,3)] + _f2[LU3(1,1,6)] + _f2[LU3(1,1,7)])); u2f = ((_f2[LU3(1,1,2)] + _f2[LU3(1,1,5)] + _f2[LU3(1,1,6)]) - (_f2[LU3(1,1,4)] + _f2[LU3(1,1,7)] + _f2[LU3(1,1,8)])); if (INCOMP != 1){ u1f /= rhof; u2f /= rhof; } rhowall = _rho[LU2(0,0)]; rhowall = rhof; #pragma omp for for (int k = 0; k < Q; k++) _f2[LU3(0,0,k)] = calcfeq(k,Ubot,Vlef,rhowall) + (_f2[LU3(1,1,k)] - calcfeq(k, u1f, u2f, rhof)); // Bottom Right (Nm1,0) rhof = _f2[LU3(Nm2,1,0)] + _f2[LU3(Nm2,1,1)] + _f2[LU3(Nm2,1,2)] + _f2[LU3(Nm2,1,3)] + _f2[LU3(Nm2,1,4)] + _f2[LU3(Nm2,1,5)]+ _f2[LU3(Nm2,1,6)]+ _f2[LU3(Nm2,1,7)]+ _f2[LU3(Nm2,1,8)]; u1f = ((_f2[LU3(Nm2,1,1)] + _f2[LU3(Nm2,1,5)] + _f2[LU3(Nm2,1,8)]) - (_f2[LU3(Nm2,1,3)] + _f2[LU3(Nm2,1,6)] + _f2[LU3(Nm2,1,7)])); u2f = ((_f2[LU3(Nm2,1,2)] + _f2[LU3(Nm2,1,5)] + _f2[LU3(Nm2,1,6)]) - (_f2[LU3(Nm2,1,4)] + _f2[LU3(Nm2,1,7)] + _f2[LU3(Nm2,1,8)])); if (INCOMP != 1){ u1f /= rhof; u2f /= rhof; } rhowall = _rho[LU2(Nm1,0)]; rhowall = rhof; #pragma omp for for (int k = 0; k < Q; k++) _f2[LU3(Nm1,0,k)] = calcfeq(k,Ubot,Vrig,rhowall) + (_f2[LU3(Nm2,1,k)] - calcfeq(k, u1f, u2f, rhof)); // Top Right (Nm1,Nm1) rhof = _f2[LU3(Nm2,Mm2,0)] + _f2[LU3(Nm2,Mm2,1)] + _f2[LU3(Nm2,Mm2,2)] + _f2[LU3(Nm2,Mm2,3)] + _f2[LU3(Nm2,Mm2,4)] + _f2[LU3(Nm2,Mm2,5)]+ _f2[LU3(Nm2,Mm2,6)]+ _f2[LU3(Nm2,Mm2,7)]+ _f2[LU3(Nm2,Mm2,8)]; u1f = ((_f2[LU3(Nm2,Mm2,1)] + _f2[LU3(Nm2,Mm2,5)] + _f2[LU3(Nm2,Mm2,8)]) - (_f2[LU3(Nm2,Mm2,3)] + _f2[LU3(Nm2,Mm2,6)] + _f2[LU3(Nm2,Mm2,7)])); u2f = ((_f2[LU3(Nm2,Mm2,2)] + _f2[LU3(Nm2,Mm2,5)] + _f2[LU3(Nm2,Mm2,6)]) - (_f2[LU3(Nm2,Mm2,4)] + _f2[LU3(Nm2,Mm2,7)] + _f2[LU3(Nm2,Mm2,8)])); if (INCOMP != 1){ u1f /= rhof; u2f /= rhof; } rhowall = _rho[LU2(Nm1,Mm1)]; rhowall = rhof; #pragma omp for for (int k = 0; k < Q; k++) _f2[LU3(Nm1,Mm1,k)] = calcfeq(k,Utop,Vrig,rhowall) + (_f2[LU3(Nm2,Mm2,k)] - calcfeq(k, u1f, u2f, rhof)); // Top Left (0,Nm1) rhof = _f2[LU3(1,Mm2,0)] + _f2[LU3(1,Mm2,1)] + _f2[LU3(1,Mm2,2)] + _f2[LU3(1,Mm2,3)] + _f2[LU3(1,Mm2,4)] + _f2[LU3(1,Mm2,5)]+ _f2[LU3(1,Mm2,6)]+ _f2[LU3(1,Mm2,7)]+ _f2[LU3(1,Mm2,8)]; u1f = ((_f2[LU3(1,Mm2,1)] + _f2[LU3(1,Mm2,5)] + _f2[LU3(1,Mm2,8)]) - (_f2[LU3(1,Mm2,3)] + _f2[LU3(1,Mm2,6)] + _f2[LU3(1,Mm2,7)])); u2f = ((_f2[LU3(1,Mm2,2)] + _f2[LU3(1,Mm2,5)] + _f2[LU3(1,Mm2,6)]) - (_f2[LU3(1,Mm2,4)] + _f2[LU3(1,Mm2,7)] + _f2[LU3(1,Mm2,8)])); if (INCOMP != 1){ u1f /= rhof; u2f /= rhof; } rhowall = _rho[LU2(0,Mm1)]; rhowall = rhof; #pragma omp for for (int k = 0; k < Q; k++) _f2[LU3(0,Mm1,k)] = calcfeq(k,Utop,Vlef,rhowall) + (_f2[LU3(1,Mm2,k)] - calcfeq(k, u1f, u2f, rhof)); }; break; } case 1: { // Poiseuille Flow #pragma omp parallel { double rhof{},u1f{},u2f{}; double rhowall{1.0}; #pragma omp for for (int i = 1; i < N-1; i++){ // Bottom wall, (i, 0) rhowall = _f2[LU3(i,0,0)] +_f2[LU3(i,0,1)] +_f2[LU3(i,0,3)] + 2.0 * (_f2[LU3(i,0,4)] +_f2[LU3(i,0,7)] +_f2[LU3(i,0,8)]); rhof = _f2[LU3(i,1,0)] + _f2[LU3(i,1,1)] + _f2[LU3(i,1,2)] + _f2[LU3(i,1,3)] + _f2[LU3(i,1,4)] + _f2[LU3(i,1,5)]+ _f2[LU3(i,1,6)]+ _f2[LU3(i,1,7)]+ _f2[LU3(i,1,8)]; u1f = ((_f2[LU3(i,1,1)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,8)]) - (_f2[LU3(i,1,3)] + _f2[LU3(i,1,6)] + _f2[LU3(i,1,7)])) / rhof; u2f = ((_f2[LU3(i,1,2)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,6)]) - (_f2[LU3(i,1,4)] + _f2[LU3(i,1,7)] + _f2[LU3(i,1,8)])) / rhof; for (int k = 0; k < Q; k++) _f2[LU3(i,0,k)] = calcfeq(k,0.0,0.0,rhowall) + (_f2[LU3(i,1,k)] - calcfeq(k, u1f, u2f, rhof)); // Top Wall, (i,Nm1) rhowall = _f2[LU3(i,Mm1,0)] +_f2[LU3(i,Mm1,1)] +_f2[LU3(i,Mm1,3)] + 2.0 * (_f2[LU3(i,Mm1,2)] +_f2[LU3(i,Mm1,6)] +_f2[LU3(i,Mm1,5)]); rhof = _f2[LU3(i,Mm2,0)] + _f2[LU3(i,Mm2,1)] + _f2[LU3(i,Mm2,2)] + _f2[LU3(i,Mm2,3)] + _f2[LU3(i,Mm2,4)] + _f2[LU3(i,Mm2,5)]+ _f2[LU3(i,Mm2,6)]+ _f2[LU3(i,Mm2,7)]+ _f2[LU3(i,Mm2,8)]; u1f = ((_f2[LU3(i,Mm2,1)] + _f2[LU3(i,Mm2,5)] + _f2[LU3(i,Mm2,8)]) - (_f2[LU3(i,Mm2,3)] + _f2[LU3(i,Mm2,6)] + _f2[LU3(i,Mm2,7)])) / rhof; u2f = ((_f2[LU3(i,Mm2,2)] + _f2[LU3(i,Mm2,5)] + _f2[LU3(i,Mm2,6)]) - (_f2[LU3(i,Mm2,4)] + _f2[LU3(i,Mm2,7)] + _f2[LU3(i,Mm2,8)])) / rhof; for (int k = 0; k < Q; k++) _f2[LU3(i,Mm1,k)] = calcfeq(k,0.0,0.0,rhowall) + (_f2[LU3(i,Mm2,k)] - calcfeq(k, u1f, u2f, rhof)); } } break; } case 0: { // Couette Flow #pragma omp parallel { double rhof{},u1f{},u2f{}; double rhowall{1.0}; #pragma omp for for (int i = 1; i < N-1; i++){ // Bottom wall, (i, 0) rhowall = _f2[LU3(i,0,0)] +_f2[LU3(i,0,1)] +_f2[LU3(i,0,3)] + 2.0 * (_f2[LU3(i,0,4)] +_f2[LU3(i,0,7)] +_f2[LU3(i,0,8)]); rhof = _f2[LU3(i,1,0)] + _f2[LU3(i,1,1)] + _f2[LU3(i,1,2)] + _f2[LU3(i,1,3)] + _f2[LU3(i,1,4)] + _f2[LU3(i,1,5)]+ _f2[LU3(i,1,6)]+ _f2[LU3(i,1,7)]+ _f2[LU3(i,1,8)]; u1f = ((_f2[LU3(i,1,1)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,8)]) - (_f2[LU3(i,1,3)] + _f2[LU3(i,1,6)] + _f2[LU3(i,1,7)])) / rhof; u2f = ((_f2[LU3(i,1,2)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,6)]) - (_f2[LU3(i,1,4)] + _f2[LU3(i,1,7)] + _f2[LU3(i,1,8)])) / rhof; for (int k = 0; k < Q; k++) _f2[LU3(i,0,k)] = calcfeq(k,Ubot,0.0,rhowall) + (_f2[LU3(i,1,k)] - calcfeq(k, u1f, u2f, rhof)); // Top Wall, (i,Nm1) rhowall = _f2[LU3(i,Mm1,0)] +_f2[LU3(i,Mm1,1)] +_f2[LU3(i,Mm1,3)] + 2.0 * (_f2[LU3(i,Mm1,2)] +_f2[LU3(i,Mm1,6)] +_f2[LU3(i,Mm1,5)]); rhof = _f2[LU3(i,Mm2,0)] + _f2[LU3(i,Mm2,1)] + _f2[LU3(i,Mm2,2)] + _f2[LU3(i,Mm2,3)] + _f2[LU3(i,Mm2,4)] + _f2[LU3(i,Mm2,5)]+ _f2[LU3(i,Mm2,6)]+ _f2[LU3(i,Mm2,7)]+ _f2[LU3(i,Mm2,8)]; u1f = ((_f2[LU3(i,Mm2,1)] + _f2[LU3(i,Mm2,5)] + _f2[LU3(i,Mm2,8)]) - (_f2[LU3(i,Mm2,3)] + _f2[LU3(i,Mm2,6)] + _f2[LU3(i,Mm2,7)])) / rhof; u2f = ((_f2[LU3(i,Mm2,2)] + _f2[LU3(i,Mm2,5)] + _f2[LU3(i,Mm2,6)]) - (_f2[LU3(i,Mm2,4)] + _f2[LU3(i,Mm2,7)] + _f2[LU3(i,Mm2,8)])) / rhof; for (int k = 0; k < Q; k++) _f2[LU3(i,Mm1,k)] = calcfeq(k,Utop,0.0,rhowall) + (_f2[LU3(i,Mm2,k)] - calcfeq(k, u1f, u2f, rhof)); } } break; } default: { std::printf("Error: Invalid Boundary condition case number\n"); exit(1); } } }; inline void HWBB(){ double rhowall = _rhobar; switch (BC){ case 2: { // Lid-driven cavity flow #pragma omp parallel for for (int i = 1; i < Mm1; i++){ // Left wall // rhowall = _f2[LU3(0,i,0)] +_f2[LU3(0,i,2)] +_f2[LU3(0,i,4)] + 2.0 * (_f2[LU3(0,i,3)] +_f2[LU3(0,i,6)] +_f2[LU3(0,i,7)]); _f2[LU3(0, i, 5)] = _fstar[LU3(0, i, 7)] - 2.0 * w[7] * rhowall * c[7][1] * Vlef * 3.0; _f2[LU3(0, i, 1)] = _fstar[LU3(0, i, 3)]; _f2[LU3(0, i, 8)] = _fstar[LU3(0, i, 6)] - 2.0 * w[6] * rhowall * c[6][1] * Vlef * 3.0; // Right wall // rhowall = _f2[LU3(Nm1,i,0)] +_f2[LU3(Nm1,i,2)] +_f2[LU3(Nm1,i,4)] + 2.0 * (_f2[LU3(Nm1,i,1)] +_f2[LU3(Nm1,i,5)] +_f2[LU3(Nm1,i,8)]); _f2[LU3(Nm1, i, 7)] = _fstar[LU3(Nm1, i, 5)] - 2.0 * w[5] * rhowall * c[5][1] * Vrig * 3.0; _f2[LU3(Nm1, i, 3)] = _fstar[LU3(Nm1, i, 1)]; _f2[LU3(Nm1, i, 6)] = _fstar[LU3(Nm1, i, 8)] - 2.0 * w[8] * rhowall * c[8][1] * Vrig * 3.0; } #pragma omp parallel for for (int i = 1; i < Nm1; i++){ // Bottom wall // rhowall = _f2[LU3(i,0,0)] +_f2[LU3(i,0,1)] +_f2[LU3(i,0,3)] + 2.0 * (_f2[LU3(i,0,4)] +_f2[LU3(i,0,7)] +_f2[LU3(i,0,8)]); _f2[LU3(i, 0, 6)] = _fstar[LU3(i, 0, 8)] - 2.0 * w[8] * rhowall * c[8][0] * Ubot * 3.0; _f2[LU3(i, 0, 2)] = _fstar[LU3(i, 0, 4)]; _f2[LU3(i, 0, 5)] = _fstar[LU3(i, 0, 7)] - 2.0 * w[7] * rhowall * c[7][0] * Ubot * 3.0; // Top wall // rhowall = _f2[LU3(i,Mm1,0)] +_f2[LU3(i,Mm1,1)] +_f2[LU3(i,Mm1,3)] + 2.0 * (_f2[LU3(i,Mm1,2)] +_f2[LU3(i,Mm1,6)] +_f2[LU3(i,Mm1,5)]); _f2[LU3(i, Mm1, 8)] = _fstar[LU3(i, Mm1, 6)] - 2.0 * w[6] * rhowall * c[6][0] * Utop * 3.0; _f2[LU3(i, Mm1, 4)] = _fstar[LU3(i, Mm1, 2)]; _f2[LU3(i, Mm1, 7)] = _fstar[LU3(i, Mm1, 5)] - 2.0 * w[5] * rhowall * c[5][0] * Utop * 3.0; } // Corners // Top left // rhowall = _rho[LU2(0, Mm1)]; _f2[LU3(0, Mm1, 1)] = _fstar[LU3(0, Mm1, 3)]; _f2[LU3(0, Mm1, 8)] = _fstar[LU3(0, Mm1, 6)] - 2.0 * w[6] * rhowall * (c[6][0] * Utop + c[6][1] * Vlef) * 3.0; _f2[LU3(0, Mm1, 4)] = _fstar[LU3(0, Mm1, 2)]; // Bottom Left // rhowall = _rho[LU2(0, 0)]; _f2[LU3(0, 0, 1)] = _fstar[LU3(0, 0, 3)]; _f2[LU3(0, 0, 5)] = _fstar[LU3(0, 0, 7)] - 2.0 * w[7] * rhowall * (c[7][0] * Ubot + c[7][1] * Vlef) * 3.0; _f2[LU3(0, 0, 2)] = _fstar[LU3(0, 0, 4)]; // Top Right // rhowall = _rho[LU2(Nm1, Mm1)]; _f2[LU3(Nm1, Mm1, 3)] = _fstar[LU3(Nm1, Mm1, 1)]; _f2[LU3(Nm1, Mm1, 7)] = _fstar[LU3(Nm1, Mm1, 5)] - 2.0 * w[5] * rhowall * (c[5][0] * Utop + c[5][1] * Vrig) * 3.0; _f2[LU3(Nm1, Mm1, 4)] = _fstar[LU3(Nm1, Mm1, 2)]; // Bottom Right // rhowall = _rho[LU2(Nm1, 0)]; _f2[LU3(Nm1, 0, 3)] = _fstar[LU3(Nm1, 0, 1)]; _f2[LU3(Nm1, 0, 6)] = _fstar[LU3(Nm1, 0, 8)] - 2.0 * w[8] * rhowall * (c[8][0] * Ubot + c[8][1] * Vrig) * 3.0; _f2[LU3(Nm1, 0, 2)] = _fstar[LU3(Nm1, 0, 4)]; break; } case 1: { // Poiseuille Flow #pragma omp parallel for for (int i = 0; i < N; i++){ // Bottom wall _f2[LU3(i, 0, 6)] = _fstar[LU3(i, 0, 8)]; _f2[LU3(i, 0, 2)] = _fstar[LU3(i, 0, 4)]; _f2[LU3(i, 0, 5)] = _fstar[LU3(i, 0, 7)]; // Top wall _f2[LU3(i, Mm1, 8)] = _fstar[LU3(i, Mm1, 6)]; _f2[LU3(i, Mm1, 4)] = _fstar[LU3(i, Mm1, 2)]; _f2[LU3(i, Mm1, 7)] = _fstar[LU3(i, Mm1, 5)]; } break; } case 0: { // Couette flow #pragma omp parallel for for (int i = 0; i < N; i++){ // Bottom wall _f2[LU3(i, 0, 6)] = _fstar[LU3(i, 0, 8)] - 2.0 * w[8] * _rhobar * c[8][0] * Ubot * 3.0; _f2[LU3(i, 0, 2)] = _fstar[LU3(i, 0, 4)] - 2.0 * w[4] * _rhobar * c[4][0] * Ubot * 3.0; _f2[LU3(i, 0, 5)] = _fstar[LU3(i, 0, 7)] - 2.0 * w[7] * _rhobar * c[7][0] * Ubot * 3.0; // Top wall _f2[LU3(i, Mm1, 8)] = _fstar[LU3(i, Mm1, 6)] - 2.0 * w[6] * _rhobar * c[6][0] * Utop * 3.0; _f2[LU3(i, Mm1, 4)] = _fstar[LU3(i, Mm1, 2)] - 2.0 * w[2] * _rhobar * c[2][0] * Utop * 3.0; _f2[LU3(i, Mm1, 7)] = _fstar[LU3(i, Mm1, 5)] - 2.0 * w[5] * _rhobar * c[5][0] * Utop * 3.0; } break; } default: { std::printf("Error: Invalid Boundary condition case number\n"); exit(1); } } } inline void swap(){ _f1.swap(_f2); } inline bool convergence(const unsigned int t){ if (t == 0) _df0 = 1.0 / rmsError(); if (t % 1000 == 0) { _df = rmsError() * _df0; if (t / 1000 == _error.size()) _error.resize(2 * t / 1000); _error[t / 1000] = _df; } if (t % prntInt == 0) { cout << "\nIteration " << t << ":" << endl; printf("df/df0:\t%.3e\n", _df); stop = omp_get_wtime(); printf("Time:\t%.3e s\n", stop-start); printf("rho:\t%.3e\n", _rhobar); start = omp_get_wtime(); } return (_df < THRESH); } inline void macroVars(){ double temp{},fTotalx{},fTotaly{}; int ind1{}; _rhobar = 0.0; #pragma omp parallel for private(temp,ind1,fTotalx,fTotaly) collapse(2) reduction(+:_rhobar) for (int i = 0; i < N; i++){ for (int j = 0; j < M; j++){ ind1 = LU2(i,j); fTotalx = _forceX[ind1] + _fBodyX; fTotaly = _forceY[ind1] + _fBodyY; temp = _f2[LU3(i,j,0)] + _f2[LU3(i,j,1)] + _f2[LU3(i,j,2)] + _f2[LU3(i,j,3)] + _f2[LU3(i,j,4)] + _f2[LU3(i,j,5)] + _f2[LU3(i,j,6)] + _f2[LU3(i,j,7)] + _f2[LU3(i,j,8)]; _rho[ind1] = temp; _rhobar += (temp / N2); _u1[ind1] = ((_f2[LU3(i,j,1)] + _f2[LU3(i,j,5)] + _f2[LU3(i,j,8)]) - (_f2[LU3(i,j,3)] + _f2[LU3(i,j,6)] + _f2[LU3(i,j,7)])); _u2[ind1] = ((_f2[LU3(i,j,2)] + _f2[LU3(i,j,5)] + _f2[LU3(i,j,6)]) - (_f2[LU3(i,j,4)] + _f2[LU3(i,j,7)] + _f2[LU3(i,j,8)])); if (INCOMP != 1){ _u1[ind1] /= temp; _u2[ind1] /= temp; _p[ind1] = _rho[ind1] / 3.0; } else { _p[ind1] = (1.0/(3.0*(1.0 - w[0]))) * (calcfeq(0,ind1)+calcfeq(1,ind1)+calcfeq(2,ind1)+calcfeq(3,ind1)+calcfeq(4,ind1) +calcfeq(5,ind1)+calcfeq(6,ind1)+calcfeq(7,ind1)+calcfeq(8,ind1) - 1.5 * (P2(_u1[ind1]) + P2(_u2[ind1]))); } // Forces _u1[ind1] += 0.5 * fTotalx / temp; _u2[ind1] += 0.5 * fTotaly / temp; } } } inline void output(){ calcvmag(); calcstress(); calcVort(); FILE *f = fopen(_filename1,"w"); int ind1; if (f == nullptr) { printf("Error opening file!\n"); exit(1); } fprintf(f, "TITLE=\"%s\" VARIABLES=\"x\", \"y\", \"u\", \"v\", \"vmag\", \"omegaxy\", \"vortz\" ZONE T=\"%s\" I=%d J=%d F=POINT\n", _filename1, _filename1,N,M); for (int j = 0; j < M; j++) { for (int i = 0; i < N; i++) { ind1 = LU2(i,j); fprintf(f, "%.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f\n", _x[i], _y[j], _u1[ind1] / _Umax,_u2[ind1] / _Umax, _vmag[ind1], _stress[ind1], _vort[ind1]); } } fclose(f); FILE *f2 = fopen(_filename2,"w"); if (f2 == nullptr) { printf("Error opening file!\n"); exit(1); } for (unsigned int i = 0; i < _error.size(); i++) { if (_error[i] == 0.0) break; else fprintf(f2, "%d\t%.10e\n", 1000*i,_error[i]); } fclose(f2); } inline void immersedBoundary(){ // Calculate vector B Eigen::VectorXd matrixBx = Eigen::VectorXd::Zero(IBN),matrixBy = Eigen::VectorXd::Zero(IBN); double sumx{},sumy{},Dirac; for (int i = 0; i < IBN; i++){ sumx = 0.0; sumy = 0.0; #pragma omp parallel for collapse(2) private(Dirac) reduction(+:sumx,sumy) for (int k = (int)_IBrxmn; k <= (int)_IBrxmx; k++) { for (int l =(int) _IBrymn; l <= (int)_IBrymx; l++) { Dirac = diracdelta(_IBrx[i] - k) * diracdelta(_IBry[i] - l); sumx += _u1[LU2(k,l)] * Dirac; sumy += _u2[LU2(k,l)] * Dirac; } } matrixBx(i) = _IBub[i] - sumx; matrixBy(i) = _IBvb[i] - sumy; } // Solve for velocity correction Eigen::VectorXd IBdu = Eigen::VectorXd::Zero(IBN),IBdv = Eigen::VectorXd::Zero(IBN); IBdu = _IBmatrixAxInv * matrixBx; IBdv = _IBmatrixAyInv * matrixBy; // Spread du to LBM grid double du,dv,ind1; #pragma omp parallel for collapse(2) private(du,dv,ind1,Dirac) for (int i = (int)_IBrxmn; i <= (int)_IBrxmx; i++){ for (int j = (int)_IBrymn; j <= (int)_IBrymx; j++){ du = 0.0; dv = 0.0; ind1 = LU2(i,j);_forceX[ind1] = 0.0;_forceY[ind1] = 0.0; for (int k = 0; k < IBN; k++){ Dirac = diracdelta(_IBrx[k] - i) * diracdelta(_IBry[k] - j); du += IBdu[k] * Dirac; dv += IBdv[k] * Dirac; } _u1[ind1] += du; _u2[ind1] += dv; _forceX[ind1] += 2.0 * du * _rho[ind1]; _forceY[ind1] += 2.0 * dv * _rho[ind1]; } } } private: vector<double> _f1,_f2,_fstar,_u1,_u2,_rho,_p,_x,_y,_error,_vmag,_stress,_vort,_dudx,_dudy,_dvdx,_dvdy,_forceX,_forceY; double _df, _df0, _MACHSTAR, _TAU_P, _OMEGA, _OMEGAm, _CS, _MACH, _rhobar; char _filename1[80]; char _filename2[80]; double _omega_e,_omega_eps,_omega_q,_omega_nu, _GS[9]; double _Umax; double _pi = 3.1415926535897; vector<double> _IBrx, _IBry, _IBur, _IBvr, _IBFx, _IBFy, _IBub, _IBvb; Eigen::MatrixXd _IBmatrixAx, _IBmatrixAy, _IBmatrixAxInv, _IBmatrixAyInv; double _fBodyX,_fBodyY,_IBds,_IBrxmx,_IBrxmn,_IBrymx,_IBrymn; // Left-right periodicity inline void virtualnode(){ #pragma omp parallel for for (int j = 1; j < Mm1; j++) { _fstar[LU3(0, j, 1)] = _fstar[LU3(Nm2, j, 1)]; _fstar[LU3(0, j, 5)] = _fstar[LU3(Nm2, j, 5)]; _fstar[LU3(0, j, 8)] = _fstar[LU3(Nm2, j, 8)]; _fstar[LU3(Nm1, j, 3)] = _fstar[LU3(1, j, 3)]; _fstar[LU3(Nm1, j, 6)] = _fstar[LU3(1, j, 6)]; _fstar[LU3(Nm1, j, 7)] = _fstar[LU3(1, j, 7)]; } // Top Left _fstar[LU3(0, Mm1, 1)] = _fstar[LU3(Nm2, Mm1, 1)]; _fstar[LU3(0, Mm1, 4)] = _fstar[LU3(Nm2, Mm1, 4)]; _fstar[LU3(0, Mm1, 8)] = _fstar[LU3(Nm2, Mm1, 8)]; // Top Right _fstar[LU3(Nm1, Mm1, 3)] = _fstar[LU3(1, Mm1, 3)]; _fstar[LU3(Nm1, Mm1, 7)] = _fstar[LU3(1, Mm1, 7)]; _fstar[LU3(Nm1, Mm1, 4)] = _fstar[LU3(1, Mm1, 4)]; // Bottom Left _fstar[LU3(0, 0, 1)] = _fstar[LU3(Nm2, 0, 1)]; _fstar[LU3(0, 0, 2)] = _fstar[LU3(Nm2, 0, 2)]; _fstar[LU3(0, 0, 5)] = _fstar[LU3(Nm2, 0, 5)]; // Bottom Right _fstar[LU3(Nm1, 0, 3)] = _fstar[LU3(1, 0, 3)]; _fstar[LU3(Nm1, 0, 2)] = _fstar[LU3(1, 0, 2)]; _fstar[LU3(Nm1, 0, 6)] = _fstar[LU3(1, 0, 6)]; } // Intitializes x vector inline void linspace(vector<double> &x, const double _start, const double _end, const int _num){ for (int i = 0; i < _num; i++) x[i] = _start + i * (_end - _start) / (_num - 1.0); } inline double calcfeq(const int k, const int ind, const int check = 1){ const double u1ij=_u1[ind], u2ij = _u2[ind]; double cdotu{},feq{},u2{},rho0=_rho[ind]; if (INCOMP==1){ u2 = P2(u1ij) + P2(u2ij); const double s0 = w[0] * (-1.5 * u2); const double p = (1.0 / (3.0*(1.0 - w[0]))) * (_rho[ind] + s0); cdotu = c[k][0] * u1ij + c[k][1] * u2ij; const double s = w[k]*(3.0 * cdotu + 4.5 * P2(cdotu) - 1.5 * u2); if (k == 0) { feq = _rho[ind] - (1.0 - w[0]) * 3.0 * p + s0; } else { feq = w[k] * 3.0 * p + s; } } else{ u2 = P2(u1ij) + P2(u2ij); cdotu = c[k][0] * u1ij + c[k][1] * u2ij; feq = w[k] * (_rho[ind] + rho0 * (3.0 * cdotu + (4.5 * P2(cdotu) - 1.5 * u2) / GAMMA)); if (check == 1) checkfeq(feq,ind); } return feq; } inline double calcfeq(const int k, const double u1ij, const double u2ij, const double rhoij, const int check = 1){ double cdotu{},feq{},u2{}, rho0=rhoij; if (INCOMP==1){ u2 = P2(u1ij) + P2(u2ij); const double s0 = w[0] * (-1.5 * u2); const double p = (1.0 / (3.0*(1.0 - w[0]))) * (rhoij + s0); cdotu = c[k][0] * u1ij + c[k][1] * u2ij; const double s = w[k]*(3.0 * cdotu + 4.5 * P2(cdotu) - 1.5 * u2); if (k == 0) { feq = rhoij - (1.0 - w[0]) * 3.0 * p + s0; } else { feq = w[k] * 3.0 * p + s; } } else{ u2 = P2(u1ij) + P2(u2ij); cdotu = c[k][0] * u1ij + c[k][1] * u2ij; feq = w[k] * (rhoij + rho0 * (3.0 * cdotu + (4.5 * P2(cdotu) - 1.5 * u2) / GAMMA)); if (check == 1) checkfeq(feq,1); } return feq; } // Checks for negative feq inline void checkfeq(const double value, const int index){ if (value < 0) { printf("Error: negative feq at index %d. Therefore, unstable.\n",index); exit(1); } } inline double rmsError(){ double difference{}; #pragma omp parallel for reduction(+:difference) for (int i = 0; i < NQ; i++) difference += P2(_f2[i] - _f1[i]); return sqrt(difference / NQ); } inline void calcmeq(vector<double> &meq, const double u1, const double u2, const double rho, const double pres) { const double u12 = P2(u1); const double u22 = P2(u2); if (INCOMP==1) { const double alpha2 = 24.0, alpha3 = -36.0; const double c1 = -2.0, c2 = -2.0; const double gamma1 = 2.0/3.0,gamma2 = 18.0,gamma3=2.0/3.0,gamma4=-18.0; meq[0] = rho; // rho meq[1] = 0.25*alpha2*pres + (gamma2/6.0)*(u12+u22); // e meq[2] = 0.25*alpha3*pres + (gamma4/6.0)*(u12+u22); // eps meq[3] = u1; // jx meq[4] = 0.5*c1*u1; // qx meq[5] = u2; // jy meq[6] = 0.5*c2*u2; // qy meq[7] = 1.5*gamma1*(u12-u22); // pxx meq[8] = 1.5*gamma3*u1*u2; // pxy } else { meq[0] = rho; // rho meq[1] = -2 * rho + 3 * rho * (u12 + u22) / GAMMA; // e meq[2] = rho - 3 * rho * (u12 + u22) / GAMMA; // eps meq[3] = rho * u1; // jx meq[4] = -meq[3]; // qx meq[5] = rho * u2; // jy meq[6] = -meq[5]; // qy meq[7] = rho * (u12 - u22) / GAMMA; // pxx meq[8] = rho * u1 * u2 / GAMMA; // pxy } } inline int LU2(const int i, const int j){ return N*j + i; } inline int LU3(const int i, const int j, const int k){ return N2*k + N*j + i; } inline double P2(const double value){ return (value * value); } inline void calcvmag(){ #pragma omp parallel for for (int i = 0; i < N2; i++) _vmag[i] = sqrt(P2(_u1[i]) + P2(_u2[i])); } inline void calcstress(){ #pragma omp parallel { int ind1; #pragma omp for collapse(2) for (int i = 0; i < N; i++){ for (int j = 0; j < M; j++){ ind1 = LU2(i,j); for (int k = 0; k < Q; k++){ _stress[ind1] -= (1.0 - 0.5 * _OMEGA) * c[k][0] * c[k][1] * (_f2[LU3(i,j,k)] - calcfeq(k,ind1)); } if (BC == 1) _stress[ind1] -= 0.5 * (1.0 - 0.5 * _OMEGA) * (_forceX[ind1] * _u2[ind1] + _forceY[ind1] * _u1[ind1]); } } } } inline void calcVort(){ calcDerivatives(); #pragma omp parallel for for (int i = 0; i < N2; i++) _vort[i] = _dvdx[i] - _dudy[i]; } inline void calcDerivatives(){ // 2rd order Finite Difference int indS{},indE{},ind{},indN{},indW{}; double hinv = 1.0 / ((_x[1] - _x[0]) * _Umax); // scales derivatives in space and velocity scale // // d/dx central differences for (int i = 1; i < Nm1; i++){ for (int j = 1; j < M; j++){ ind = LU2(i,j); // Current point indE = LU2(i+1,j); // West indW = LU2(i-1,j); // East _dudx[ind] = 0.5 * (_u1[indE] - _u1[indW]) * hinv; _dvdx[ind] = 0.5 * (_u2[indE] - _u2[indW]) * hinv; } } // d/dy central differences for (int i = 0; i < N; i++){ for (int j = 1; j < Mm1; j++){ ind = LU2(i,j); // Current point indN = LU2(i,j+1); // North indS = LU2(i,j-1); // South _dudy[ind] = 0.5 * (_u1[indN] - _u1[indS]) * hinv; _dvdy[ind] = 0.5 * (_u2[indN] - _u2[indS]) * hinv; } } int indSS{},indNN{},indEE{},indWW{}; // d/dx forward (i = 0) & backward (i = Nm1) differences for (int j = 0; j < M; j++){ // forward ind = LU2(0,j); indE = LU2(1,j); indEE = LU2(2,j); _dudx[ind] = (-1.5 * _u1[ind] + 2.0 * _u1[indE] - 0.5 * _u1[indEE]) * hinv; _dvdx[ind] = (-1.5 * _u2[ind] + 2.0 * _u2[indE] - 0.5 * _u2[indEE]) * hinv; // backward ind = LU2(Nm1,j); indW = LU2(Nm2,j); indWW = LU2(N - 3,j); _dudx[ind] = (1.5 * _u1[ind] - 2.0 * _u1[indW] + 0.5 * _u1[indWW]) * hinv; _dvdx[ind] = (1.5 * _u2[ind] - 2.0 * _u2[indW] + 0.5 * _u2[indWW]) * hinv; } // d/dy forward (j = 0) & backward (j = Mm1) differences for (int i = 0; i < N; i++){ // forward ind = LU2(i,0); indN = LU2(i,1); indNN = LU2(i,2); _dudy[ind] = (-1.5 * _u1[ind] + 2.0 * _u1[indN] - 0.5 * _u1[indNN]) * hinv; _dvdy[ind] = (-1.5 * _u2[ind] + 2.0 * _u2[indN] - 0.5 * _u2[indNN]) * hinv; // backward ind = LU2(i,Mm1); indS = LU2(i,Mm2); indSS = LU2(i,M - 3); _dudy[ind] = (1.5 * _u1[ind] - 2.0 * _u1[indS] + 0.5 * _u1[indSS]) * hinv; _dvdy[ind] = (1.5 * _u2[ind] - 2.0 * _u2[indS] + 0.5 * _u2[indSS]) * hinv; } } inline double diracdelta(const double x, const int order = 4){ const double absx = abs(x); double phi{}; const double dx = 1.0; switch (order){ case 2 : { if (absx < dx) phi = 1.0 - absx; else phi = 0.0; break; } case 3 : { if (absx <= 0.5 * dx) phi = (1.0/3.0) * (1.0 + sqrt(1.0 - 3.0 * P2(absx))); else if (absx <= 1.5 * dx) phi = (1.0/6.0) * (5.0 - 3.0 * absx - sqrt(-2.0 + 6.0 * absx - 3.0 * P2(absx))); else phi = 0.0; break; } case 4 : { if (absx <= dx) phi = (1.0/8.0) * (3.0 - 2.0 * absx + sqrt(1.0 + 4.0 * absx - 4.0 * P2(absx))); else if (absx <= (2.0 * dx)) phi = (1.0/8.0) * (5.0 - 2.0 * absx - sqrt(-7.0 + 12.0 * absx - 4.0 * P2(absx))); else phi = 0.0; break; } default : { cout << "Invalid order for delta function. Must be 2,3, or 4" << endl; exit(1); } } return phi; } bool diagdominant(const vector<vector<double>> &matrix, const int size){ double diag{},sum{}; bool dd = true; for (int i = 0; i < size; i++){ diag = matrix[i][i]; sum = 0.0; for (int j = 0; j < size; j++){ if (j != i) sum += matrix[i][j]; } if (sum > diag){ dd = false; break; } } return dd; } }; #endif //LIDDRIVENCAVITYLBM_LBMCLASS_H
symv_x_coo_n_lo.c
#include "alphasparse/kernel.h" #include "alphasparse/kernel_plain.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t symv_coo_n_lo_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT nnz = A->nnz; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < nnz; i++) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT r = A->row_indx[i]; const ALPHA_INT c = A->col_indx[i]; if (r < c) { continue; } ALPHA_Number v; alpha_mul(v, alpha, A->values[i]); if (r == c) { alpha_madde(tmp[threadId][r], v, x[c]); } else { alpha_madde(tmp[threadId][r], v, x[c]); alpha_madde(tmp[threadId][c], v, x[r]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (int i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT thread_num = alpha_get_thread_num(); return symv_coo_n_lo_omp(alpha, A, x, beta, y); }
asm-1.c
/* PR middle-end/30263 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp" } */ void foo (void) { int s0, s1 = 5, s2 = 6; int p0, p1, p2; int f0 = 4, f1 = 5, f2 = 6; #pragma omp parallel shared (s0, s1, s2) private (p0, p1, p2) \ firstprivate (f0, f1, f2) { asm ("" : "=m" (p0) : "m" (p1), "mr" (p2)); if (omp_get_thread_num () == 0) asm ("" : "=m" (s0) : "m" (s1), "mr" (s2)); asm ("" : "=m" (f0) : "m" (f1), "mr" (f2)); } }
ompsievetest.c
// Copyright 2009-2021 NTESS. Under the terms // of Contract DE-NA0003525 with NTESS, the U.S. // Government retains certain rights in this software. // // Copyright (c) 2009-2021, NTESS // All rights reserved. // // Portions are copyright of other developers: // See the file CONTRIBUTORS.TXT in the top level directory // the distribution for more information. // // This file is part of the SST software package. For license // information, see the LICENSE file in the top level directory of the // distribution. #include <stdio.h> #include <stdlib.h> #include <vector> /* * Test for sieve * Contains: * * direct calls to malloc * * indirect calls (via std library containers) * * calls from each thread * * accesses to mallocs by different threads * */ int main(int argc, char* argv[]) { const int n = 20; int** the_array = (int**) malloc(sizeof(int*) * n); int i = 0; #pragma omp parallel for for(i = 0; i < n; ++i) { the_array[i] = (int*) malloc(sizeof(int) * n); int j = 0; for (j = 0; j < n; ++j) { the_array[i][j] = 0; // initialize } } #pragma omp parallel for for(i = 0; i < n; ++i) { int j = 0; for(j = 0; j < n; ++j) { if (j < i) { the_array[i][j] = 1; } else { the_array[i][j] = 0; } } } // Now have a triangle matrix, no do something with std lib std::vector<int> rowSums; for (int i = 0; i < n; i++) { rowSums.push_back(0); for (int j = 0; j < n; j++) { rowSums[i] += the_array[i][j]; } } printf("The vector is:\n"); for (std::vector<int>::iterator it = rowSums.begin(); it != rowSums.end(); it++) { printf("%d\n", *it); } }
implementation.h
#ifndef MIGRATION_ON_C_IMPLEMENTATION_H #define MIGRATION_ON_C_IMPLEMENTATION_H #include <vector> #include <iostream> #include <omp.h> #include <cstddef> template < typename T,typename T1> void calculate_sumAmp_node(T *ptr, T1 *ptr1, double *ptr2, std::ptrdiff_t n_traces, std::ptrdiff_t n_samples, std::ptrdiff_t n_node, float dt,std::ptrdiff_t strides_seismogram_info_y, std::ptrdiff_t strides_seismogram_info_x,std::ptrdiff_t strides_timeneiron_info_y, std::ptrdiff_t strides_timeneiron_info_x) { #pragma omp parallel for for (int i = 0; i < n_node; i++) { // Первый элемент строки t нейронки double res_tmp = 0.0; for (int j = 0; j < n_traces; j++) { // Оставшиеся элементы в строке int indx; indx = int(ptr1[strides_timeneiron_info_y* i + j*strides_timeneiron_info_x] / dt); if (indx < n_samples) { res_tmp += ptr[j * strides_seismogram_info_y + indx*strides_seismogram_info_x]; } } ptr2[i] = res_tmp; } } #endif //MIGRATION_ON_C_IMPLEMENTATION_H
DRACC_OMP_026_MxV_Missing_Exit_Data_yes.c
/* Matrix Vector multiplication without copying back the result c, while utilising the enter data construct. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #define C 512 int *a; int *b; int *c; int init(){ for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } int Mult(){ #pragma omp target enter data map(to:a[0:C],b[0:C*C],c[0:C]) device(0) #pragma omp target device(0) { #pragma omp teams distribute parallel for for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ c[i]+=b[j+i*C]*a[j]; } } } #pragma omp target exit data map(release:c[0:C]) map(release:a[0:C],b[0:C*C]) device(0) return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ a = malloc(C*sizeof(int)); b = malloc(C*C*sizeof(int)); c = malloc(C*sizeof(int)); init(); Mult(); check(); free(a); free(b); free(c); return 0; }
vecAdd_deadlock.c
/****************************************************************************** * FILE: omp_bug5.c * DESCRIPTION: * Using SECTIONS, two threads initialize their own array and then add * it to the other's array, however a deadlock occurs. * AUTHOR: Blaise Barney 01/29/04 * LAST REVISED: 04/06/05 ******************************************************************************/ /** * The first thread acquires locka and then tries to get lockb before releasing * locka. Meanwhile, the second thread has acquired lockb and then tries to get * locka before releasing lockb. * Online source: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_bug5.c **/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 10 //Originally 1000000 #define PI 3.1415926535 #define DELTA .01415926535 int main (int argc, char *argv[]) { int nthreads, tid, i; float a[N], b[N]; omp_lock_t locka, lockb; /* Initialize the locks */ omp_init_lock(&locka); omp_init_lock(&lockb); /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid) { /* Obtain thread number and number of threads */ tid = omp_get_thread_num(); #pragma omp master { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n", tid); #pragma omp barrier #pragma omp sections nowait { #pragma omp section { printf("Thread %d initializing a[]\n",tid); omp_set_lock(&locka); for (i=0; i<N; i++) a[i] = i * DELTA; omp_set_lock(&lockb); printf("Thread %d adding a[] to b[]\n",tid); for (i=0; i<N; i++) b[i] += a[i]; omp_unset_lock(&lockb); omp_unset_lock(&locka); } #pragma omp section { printf("Thread %d initializing b[]\n",tid); omp_set_lock(&lockb); for (i=0; i<N; i++) b[i] = i * PI; omp_set_lock(&locka); printf("Thread %d adding b[] to a[]\n",tid); for (i=0; i<N; i++) a[i] += b[i]; omp_unset_lock(&locka); omp_unset_lock(&lockb); } } /* end of sections */ } /* end of parallel region */ }
NeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_NG_H_ #define _SPTAG_COMMON_NG_H_ #include "../VectorIndex.h" #include "CommonUtils.h" #include "Dataset.h" #include "FineGrainedLock.h" #include "QueryResultSet.h" #include <chrono> #if defined(GPU) #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <typeinfo> #include <cuda_fp16.h> #include "inc/Core/Common/cuda/KNN.hxx" #include "inc/Core/Common/cuda/params.h" #endif namespace SPTAG { namespace COMMON { class NeighborhoodGraph { public: NeighborhoodGraph(): m_iTPTNumber(32), m_iTPTLeafSize(2000), m_iSamples(1000), m_numTopDimensionTPTSplit(5), m_iNeighborhoodSize(32), m_fNeighborhoodScale(2.0), m_fCEFScale(2.0), m_fRNGFactor(1.0), m_iRefineIter(2), m_iCEF(1000), m_iAddCEF(500), m_iMaxCheckForRefineGraph(10000), m_iGPUGraphType(2), m_iGPURefineSteps(0), m_iGPURefineDepth(2), m_iGPULeafSize(500) {} ~NeighborhoodGraph() {} virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0; virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0; virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { DimensionType* correct = new DimensionType[samples]; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < samples; i++) { SizeType x = COMMON::Utils::rand(m_iGraphSize); //int x = i; COMMON::QueryResultSet<void> query(nullptr, m_iCEF); for (SizeType y = 0; y < m_iGraphSize; y++) { if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue; float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y)); query.AddPoint(y, dist); } query.SortResult(); SizeType * exact_rng = new SizeType[m_iNeighborhoodSize]; RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF); correct[i] = 0; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (exact_rng[j] == -1) { correct[i] += m_iNeighborhoodSize - j; break; } for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) { correct[i]++; break; } } delete[] exact_rng; } float acc = 0; for (SizeType i = 0; i < samples; i++) acc += float(correct[i]); acc = acc / samples / m_iNeighborhoodSize; delete[] correct; return acc; } #if defined(GPU) template <typename T> void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap) { SizeType initSize; SPTAG::Helper::Convert::ConvertStringTo(index->GetParameter("NumberOfInitialDynamicPivots").c_str(), initSize); // Build the entire RNG graph, both builds the KNN and refines it to RNG buildGraph<T>(index, m_iGraphSize, m_iNeighborhoodSize, m_iTPTNumber, (int*)m_pNeighborhoodGraph[0], m_iGPURefineSteps, m_iGPURefineDepth, m_iGPUGraphType, m_iGPULeafSize, initSize); if (idmap != nullptr) { std::unordered_map<SizeType, SizeType>::const_iterator iter; for (SizeType i = 0; i < m_iGraphSize; i++) { for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if ((iter = idmap->find(m_pNeighborhoodGraph[i][j])) != idmap->end()) m_pNeighborhoodGraph[i][j] = iter->second; } } } } #else template <typename T> void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, std::vector<std::pair<SizeType, SizeType>> & leaves) { if (last - first <= m_iTPTLeafSize) { leaves.emplace_back(first, last); } else { std::vector<float> Mean(index->GetFeatureDim(), 0); int iIteration = 100; SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] += v[k]; } } for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] /= count; } std::vector<BasicResult> Variance; Variance.reserve(index->GetFeatureDim()); for (DimensionType j = 0; j < index->GetFeatureDim(); j++) { Variance.emplace_back(j, 0.0f); } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { float dist = v[k] - Mean[k]; Variance[k].Dist += dist*dist; } } std::sort(Variance.begin(), Variance.end(), COMMON::Compare); std::vector<SizeType> indexs(m_numTopDimensionTPTSplit); std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit); float bestvariance = Variance[index->GetFeatureDim() - 1].Dist; for (int i = 0; i < m_numTopDimensionTPTSplit; i++) { indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID; bestweight[i] = 0; } bestweight[0] = 1; float bestmean = Mean[indexs[0]]; std::vector<float> Val(count); for (int i = 0; i < iIteration; i++) { float sumweight = 0; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] = float(rand() % 10000) / 5000.0f - 1.0f; sumweight += weight[j] * weight[j]; } sumweight = sqrt(sumweight); for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] /= sumweight; } float mean = 0; for (SizeType j = 0; j < count; j++) { Val[j] = 0; const T* v = (const T*)index->GetSample(indices[first + j]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { Val[j] += weight[k] * v[indexs[k]]; } mean += Val[j]; } mean /= count; float var = 0; for (SizeType j = 0; j < count; j++) { float dist = Val[j] - mean; var += dist * dist; } if (var > bestvariance) { bestvariance = var; bestmean = mean; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { bestweight[j] = weight[j]; } } } SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { float val = 0; const T* v = (const T*)index->GetSample(indices[i]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { val += bestweight[k] * v[indexs[k]]; } if (val < bestmean) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } Mean.clear(); Variance.clear(); Val.clear(); indexs.clear(); weight.clear(); bestweight.clear(); PartitionByTptree<T>(index, indices, first, i - 1, leaves); PartitionByTptree<T>(index, indices, i, last, leaves); } } template <typename T> void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap) { COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity); std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize)); std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>()); for (SizeType i = 0; i < m_iGraphSize; i++) for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) (NeighborhoodDists)[i][j] = MaxDist; auto t1 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition begin\n"); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iTPTNumber; i++) { Sleep(i * 100); std::srand(clock()); for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j; std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end()); PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]); LOG(Helper::LogLevel::LL_Info, "Finish Getting Leaves for Tree %d\n", i); } LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition done\n"); auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Build TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()); for (int i = 0; i < m_iTPTNumber; i++) { #pragma omp parallel for schedule(dynamic) for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++) { SizeType start_index = TptreeLeafNodes[i][j].first; SizeType end_index = TptreeLeafNodes[i][j].second; if ((j * 5) % TptreeLeafNodes[i].size() == 0) LOG(Helper::LogLevel::LL_Info, "Processing Tree %d %d%%\n", i, static_cast<int>(j * 1.0 / TptreeLeafNodes[i].size() * 100)); for (SizeType x = start_index; x < end_index; x++) { for (SizeType y = x + 1; y <= end_index; y++) { SizeType p1 = TptreeDataIndices[i][x]; SizeType p2 = TptreeDataIndices[i][y]; float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2)); if (idmap != nullptr) { p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1); p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2); } COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize); COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize); } } } TptreeDataIndices[i].clear(); TptreeLeafNodes[i].clear(); } TptreeDataIndices.clear(); TptreeLeafNodes.clear(); auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Process TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count()); } #endif template <typename T> void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { LOG(Helper::LogLevel::LL_Info, "build RNG graph!\n"); m_iGraphSize = index->GetNumSamples(); m_iNeighborhoodSize = (DimensionType)(ceil(m_iNeighborhoodSize * m_fNeighborhoodScale)); m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity); if (m_iGraphSize < 1000) { RefineGraph<T>(index, idmap); LOG(Helper::LogLevel::LL_Info, "Build RNG Graph end!\n"); return; } auto t1 = std::chrono::high_resolution_clock::now(); BuildInitKNNGraph<T>(index, idmap); auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "BuildInitKNNGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()); RefineGraph<T>(index, idmap); if (idmap != nullptr) { for (auto iter = idmap->begin(); iter != idmap->end(); iter++) if (iter->first < 0) { m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second; } } auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "BuildGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t1).count()); } template <typename T> void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { for (int iter = 0; iter < m_iRefineIter - 1; iter++) { auto t1 = std::chrono::high_resolution_clock::now(); #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false, (int)(m_iCEF * m_fCEFScale)); if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", iter, static_cast<int>(i * 1.0 / m_iGraphSize * 100)); } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap)); } m_iNeighborhoodSize = (DimensionType)(m_iNeighborhoodSize / m_fNeighborhoodScale); if (m_iRefineIter > 0) { auto t1 = std::chrono::high_resolution_clock::now(); #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false, m_iCEF); if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", m_iRefineIter - 1, static_cast<int>(i * 1.0 / m_iGraphSize * 100)); } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap)); } } template <typename T> ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices, std::shared_ptr<Helper::DiskPriorityIO> output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::shared_ptr<NeighborhoodGraph> tmp; if (newGraph == nullptr) { tmp = NeighborhoodGraph::CreateInstance(Type()); newGraph = tmp.get(); } SizeType R = (SizeType)indices.size(); newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity); newGraph->m_iGraphSize = R; newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < R; i++) { if ((i * 5) % R == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d%%\n", static_cast<int>(i * 1.0 / R * 100)); SizeType* outnodes = newGraph->m_pNeighborhoodGraph[i]; COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1); index->RefineSearchIndex(query, false); RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1); std::unordered_map<SizeType, SizeType>::const_iterator iter; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]]; if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second; } if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end()) outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second; } if (output != nullptr) newGraph->SaveGraph(output); return ErrorCode::Success; } template <typename T> void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF) { COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1); index->RefineSearchIndex(query, searchDeleted); RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1); if (updateNeighbors) { // update neighbors for (int j = 0; j <= CEF; j++) { BasicResult* item = query.GetResult(j); if (item->VID < 0) break; if (item->VID == node) continue; InsertNeighbors(index, item->VID, node, item->Dist); } } } inline std::uint64_t BufferSize() const { return m_pNeighborhoodGraph.BufferSize(); } ErrorCode LoadGraph(std::shared_ptr<Helper::DiskPriorityIO> input, SizeType blockSize, SizeType capacity) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(input, blockSize, capacity)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ret; } ErrorCode LoadGraph(std::string sGraphFilename, SizeType blockSize, SizeType capacity) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(sGraphFilename, blockSize, capacity)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ret; } ErrorCode LoadGraph(char* pGraphMemFile, SizeType blockSize, SizeType capacity) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(pGraphMemFile, blockSize, capacity)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ErrorCode::Success; } ErrorCode SaveGraph(std::string sGraphFilename) const { LOG(Helper::LogLevel::LL_Info, "Save %s To %s\n", m_pNeighborhoodGraph.Name().c_str(), sGraphFilename.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sGraphFilename.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile; return SaveGraph(ptr); } ErrorCode SaveGraph(std::shared_ptr<Helper::DiskPriorityIO> output) const { IOBINARY(output, WriteBinary, sizeof(SizeType), (char*)&m_iGraphSize); IOBINARY(output, WriteBinary, sizeof(DimensionType), (char*)&m_iNeighborhoodSize); for (int i = 0; i < m_iGraphSize; i++) IOBINARY(output, WriteBinary, sizeof(SizeType) * m_iNeighborhoodSize, (char*)m_pNeighborhoodGraph[i]); LOG(Helper::LogLevel::LL_Info, "Save %s (%d,%d) Finish!\n", m_pNeighborhoodGraph.Name().c_str(), m_iGraphSize, m_iNeighborhoodSize); return ErrorCode::Success; } inline ErrorCode AddBatch(SizeType num) { ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num); if (ret != ErrorCode::Success) return ret; m_iGraphSize += num; return ErrorCode::Success; } inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; } inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; } void Update(SizeType row, DimensionType col, SizeType val) { std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]); m_pNeighborhoodGraph[row][col] = val; } inline void SetR(SizeType rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; } inline SizeType R() const { return m_iGraphSize; } inline std::string Type() const { return m_pNeighborhoodGraph.Name(); } static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type); protected: // Graph structure SizeType m_iGraphSize; COMMON::Dataset<SizeType> m_pNeighborhoodGraph; FineGrainedLock m_dataUpdateLock; public: int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit; DimensionType m_iNeighborhoodSize; float m_fNeighborhoodScale, m_fCEFScale, m_fRNGFactor; int m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph, m_iGPUGraphType, m_iGPURefineSteps, m_iGPURefineDepth, m_iGPULeafSize; }; } } #endif
DRB003-antidep2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A two-level loop nest with loop carried anti-dependence on the outer level. Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18 */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc,char *argv[]) { int i; int j; int len = 20; double a[20][20]; #pragma omp parallel for private (i,j) for (i = 0; i <= len - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= len - 1; j += 1) { a[i][j] = (i * len + j) + 0.5; } } for (i = 0; i <= len - 1 - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= len - 1; j += 1) { a[i][j] += a[i + 1][j]; } } for (i = 0; i <= len - 1; i += 1) { for (j = 0; j <= len - 1; j += 1) { printf("%lf",a[i][j]); } } printf("a[10][10]=%f\n",a[10][10]); return 0; }
two_step_v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: January 2016 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H #define KRATOS_TWO_STEP_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include "v_p_strategy.h" #include <stdio.h> #include <math.h> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class TwoStepVPStrategy : public VPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy); typedef VPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ TwoStepVPStrategy(ModelPart &rModelPart, typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart, pVelocityLinearSolver, pPressureLinearSolver, ReformDofSet, DomainSize), mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mReformDofSet(ReformDofSet) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); /* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */ this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */ BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~TwoStepVPStrategy() {} int Check() override { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if (DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", ""); ModelPart &rModelPart = BaseType::GetModelPart(); const auto &r_current_process_info = rModelPart.GetProcessInfo(); for (const auto &r_element : rModelPart.Elements()) { ierr = r_element.Check(r_current_process_info); if (ierr != 0) { break; } } return ierr; KRATOS_CATCH(""); } void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } bool SolveSolutionStep() override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; unsigned int stepsWithChangedDt = rCurrentProcessInfo[STEPS_WITH_CHANGED_DT]; bool converged = false; unsigned int maxNonLinearIterations = mMaxPressureIter; KRATOS_INFO("\nSolution with two_step_vp_strategy at t=") << currentTime << "s" << std::endl; if ((timeIntervalChanged == true && currentTime > 10 * timeInterval) || stepsWithChangedDt > 0) { maxNonLinearIterations *= 2; } if (currentTime < 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl; maxNonLinearIterations *= 3; } if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl; maxNonLinearIterations *= 2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep = false; double pressureNorm = 0; double velocityNorm = 0; this->SetBlockedAndIsolatedFlags(); for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm); this->UpdateTopology(rModelPart, BaseType::GetEchoLevel()); if (fixedTimeStep == false) { continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm); } if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 2)) { //double tensilStressSign = 1.0; // ComputeErrorL2Norm(tensilStressSign); this->UpdateStressStrain(); } if ((continuityConverged && momentumConverged) && it > 2) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); converged = true; KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } if (fixedTimeStep == true) { break; } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); return converged; } void FinalizeSolutionStep() override { } void InitializeSolutionStep() override { } void UpdateStressStrain() override { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { /* itElem-> InitializeElementStrainStressState(); */ itElem->InitializeSolutionStep(rCurrentProcessInfo); } } this->CalculateTemporalVariables(); } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "TwoStepVPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "TwoStepVPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME variables. */ bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm) override { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedMomentum = false; double NormDv = 0; fixedTimeStep = false; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1); if (it == 0) { mpMomentumStrategy->InitializeSolutionStep(); } NormDv = mpMomentumStrategy->Solve(); if (BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "-------------- s o l v e d ! ------------------" << std::endl; if (it == 0) { velocityNorm = this->ComputeVelocityNorm(); } double DvErrorNorm = NormDv / velocityNorm; unsigned int iterationForCheck = 2; // Check convergence if (it == maxIt - 1) { KRATOS_INFO("Iteration") << it << " Final Velocity error: " << DvErrorNorm << std::endl; ConvergedMomentum = this->FixTimeStepMomentum(DvErrorNorm, fixedTimeStep); } else if (it > iterationForCheck) { KRATOS_INFO("Iteration") << it << " Velocity error: " << DvErrorNorm << std::endl; ConvergedMomentum = this->CheckMomentumConvergence(DvErrorNorm, fixedTimeStep); } else { KRATOS_INFO("Iteration") << it << " Velocity error: " << DvErrorNorm << std::endl; } if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl; return ConvergedMomentum; } bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP) override { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedContinuity = false; bool fixedTimeStep = false; double NormDp = 0; // 2. Pressure solution rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5); if (it == 0) { mpPressureStrategy->InitializeSolutionStep(); } NormDp = mpPressureStrategy->Solve(); if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "The norm of pressure is: " << NormDp << std::endl; if (it == 0) { NormP = this->ComputePressureNorm(); } double DpErrorNorm = NormDp / (NormP); // Check convergence if (it == (maxIt - 1)) { KRATOS_INFO("Iteration") << it << " Final Pressure error: " << DpErrorNorm << std::endl; ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm, fixedTimeStep); } else { KRATOS_INFO("Iteration") << it << " Pressure error: " << DpErrorNorm << std::endl; ConvergedContinuity = this->CheckContinuityConvergence(DpErrorNorm, fixedTimeStep); } if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl; return ConvergedContinuity; } bool CheckVelocityConvergence(const double NormDv, double &errorNormDv) override { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormV = 0.00; errorNormDv = 0; #pragma omp parallel for reduction(+ \ : NormV) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) { NormV += r_vel[d] * r_vel[d]; } } NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); const double zero_tol = 1.0e-12; errorNormDv = (NormV < zero_tol) ? NormDv : NormDv / NormV; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << "The norm of velocity increment is: " << NormDv << std::endl; std::cout << "The norm of velocity is: " << NormV << std::endl; std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl; } if (errorNormDv < mVelocityTolerance) { return true; } else { return false; } } bool CheckPressureConvergence(const double NormDp, double &errorNormDp, double &NormP) override { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); NormP = 0.00; errorNormDp = 0; double tmp_NormP = 0.0; #pragma omp parallel for reduction(+ : tmp_NormP) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const double Pr = it_node->FastGetSolutionStepValue(PRESSURE); tmp_NormP += Pr * Pr; } NormP = tmp_NormP; NormP = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); const double zero_tol = 1.0e-12; errorNormDp = (NormP < zero_tol) ? NormDp : NormDp / NormP; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << " The norm of pressure increment is: " << NormDp << std::endl; std::cout << " The norm of pressure is: " << NormP << std::endl; std::cout << " Pressure error: " << errorNormDp << std::endl; } if (errorNormDp < mPressureTolerance) { return true; } else return false; } bool FixTimeStepMomentum(const double DvErrorNorm, bool &fixedTimeStep) override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.005; bool converged = false; if (currentTime < 10 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl; minTolerance = 0.05; if (DvErrorNorm > minTolerance) { std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); if (DvErrorNorm < mVelocityTolerance) { converged = true; } } return converged; } bool CheckMomentumConvergence(const double DvErrorNorm, bool &fixedTimeStep) override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.99999; bool converged = false; if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl; std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); if (DvErrorNorm < mVelocityTolerance) { converged = true; } } return converged; } bool FixTimeStepContinuity(const double DvErrorNorm, bool &fixedTimeStep) override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.01; bool converged = false; if (currentTime < 10 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { fixedTimeStep = true; // rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true); if (DvErrorNorm > 0.9999) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << " BAD PRESSURE CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.1" << std::endl; std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } } else if (DvErrorNorm < mPressureTolerance) { converged = true; fixedTimeStep = false; } rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); return converged; } bool CheckContinuityConvergence(const double DvErrorNorm, bool &fixedTimeStep) override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); bool converged = false; if (DvErrorNorm < mPressureTolerance) { converged = true; fixedTimeStep = false; } rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); return converged; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. TwoStepVPStrategy &operator=(TwoStepVPStrategy const &rOther) {} /// Copy constructor. TwoStepVPStrategy(TwoStepVPStrategy const &rOther) {} ///@} }; /// Class TwoStepVPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
broadcast_reduce-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015-2017 by Contributors * \file broadcast_reduce_kernel.h * \brief Function definition of elementwise unary operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" namespace mxnet { namespace op { namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } template<int ndim> MSHADOW_XINLINE void unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, int* j, int* k) { *j = 0; *k = 0; #pragma unroll for (int i = ndim-1, idx_t = idx; i >=0; --i) { const int tmp = idx_t / shape[i]; const int coord = idx_t - tmp*shape[i]; *j += coord*stridej[i]; *k += coord*stridek[i]; idx_t = tmp; } } template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } template<int ndim> MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > 1) * coord[i]; } return ret; } template<int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } #pragma unroll for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template<int ndim> MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } template<int ndim> MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) ret += coord[i] * stride[i]; return ret; } template<typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template<int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const int idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = unravel(idx, oshape); const int j = ravel(coord, lshape); const int k = ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template<typename Reducer, int ndim, typename DType, typename OP> MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto, const DType* __restrict big, DType *small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord = unravel(idx, sshape); int j = ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (int k = 0; k < M; ++k) { coord = unravel(k, rshape); Reducer::Reduce(val, OP::Map(big[j + dot(coord, rstride)]), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } #ifdef __CUDACC__ #include "broadcast_reduce-inl.cuh" #else template<int ndim, typename DType, typename OP> void binary_broadcast_compute(const int N, const bool addto, const DType *lhs, const DType *rhs, DType *out, const Shape<ndim> lshape, const Shape<ndim> rshape, const Shape<ndim> oshape) { for (int idx = 0; idx < N; ++idx) { binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape); } } template<int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { if (req == kNullOp) return; int N = out.shape_.Size(); binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(), out.shape_.get<ndim>()); } template<typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute(const int N, const int M, const bool addto, const DType *big, DType *small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int idx = 0; idx < N; ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP>(idx, M, addto, big, small, bshape, sshape, rshape, rstride); } } template<typename Reducer, int ndim, typename DType, typename OP> void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); int N = small.shape_.Size(), M = rshape.Size(); seq_reduce_compute<Reducer, ndim, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } template<int ndim, typename DType> size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req, const TShape& big) { return 0; } template<int ndim, typename DType> size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req, const TShape& big, const TShape& lhs, const TShape& rhs) { return 0; } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType *small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = unravel(idx, small_shape); const int idx_big0 = ravel(coord, big_shape); const int idx_lhs0 = ravel(coord, lhs_shape0); const int idx_rhs0 = ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (int k = 0; k < M; ++k) { Shape<ndim> coord_big = unravel(k, rshape); int idx_big = idx_big0 + dot(coord_big, rstride); Shape<ndim> coord_lhs = unravel(k, lhs_shape); int idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = unravel(k, rhs_shape); int idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const int N, const int M, const bool addto, const DType *big, const DType *lhs, const DType *rhs, DType *small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int idx = 0; idx < N; ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); int N = small.shape_.Size(); int M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>( N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
lastprivate-conditional-4.c
int x = 6, w = 8; int bar (int); void foo () { int y = 5, i; #pragma omp teams num_teams(1) firstprivate (x) shared (y) shared (w) { int z = 7; #pragma omp parallel for firstprivate (x, y, z, w) lastprivate (conditional: x, y, z, w) for (i = 0; i < 64; i++) if (bar (i)) { x = i; y = i + 1; z = i + 2; w = i + 3; } bar (y); bar (z); } }
matrix.h
//================================================================================== // BSD 2-Clause License // // Copyright (c) 2014-2022, NJIT, Duality Technologies Inc. and other contributors // // All rights reserved. // // Author TPOC: contact@openfhe.org // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //================================================================================== /* This code provide a templated matrix implementation */ #ifndef LBCRYPTO_MATH_MATRIX_H #define LBCRYPTO_MATH_MATRIX_H #include <cmath> #include <functional> #include <iostream> #include <memory> #include <string> #include <vector> #include <utility> #include "lattice/lat-hal.h" #include "math/distrgen.h" #include "math/nbtheory.h" #include "utils/inttypes.h" #include "utils/memory.h" #include "utils/utilities.h" namespace lbcrypto { // Forward declaration class Field2n; template <class Element> class Matrix : public Serializable { public: typedef std::vector<std::vector<Element>> data_t; typedef std::vector<Element> data_row_t; typedef std::function<Element(void)> alloc_func; /** * Constructor that initializes matrix values using a zero allocator * * @param &allocZero lambda function for zero initialization. * @param &rows number of rows. * @param &rows number of columns. */ Matrix(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) { data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } // TODO: add Clear(); /** * Constructor that initializes matrix values using a distribution generation * allocator * * @param &allocZero lambda function for zero initialization (used for * initializing derived matrix objects) * @param &rows number of rows. * @param &rows number of columns. * @param &allocGen lambda function for initialization using a distribution * generator. */ Matrix(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen); /** * Constructor of an empty matrix. * SetSize must be called on this matrix to use it * SetAlloc needs to be called if 0 passed to constructor * This mostly exists to support deserializing * * @param &allocZero lambda function for zero initialization. */ explicit Matrix(alloc_func allocZero = 0) : data(), rows(0), cols(0), allocZero(allocZero) {} /** * Set the size of a matrix, elements are zeroed out * * @param rows number of rows * @param cols number of colums */ void SetSize(size_t rows, size_t cols) { if (this->rows != 0 || this->cols != 0) { OPENFHE_THROW(not_available_error, "You cannot SetSize on a non-empty matrix"); } this->rows = rows; this->cols = cols; data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * SetAllocator - set the function to allocate a zero; * basically only required for deserializer * * @param allocZero */ void SetAllocator(alloc_func allocZero) { this->allocZero = allocZero; } /** * Copy constructor * * @param &other the matrix object to be copied */ Matrix(const Matrix<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) { deepCopyData(other.data); } /** * Assignment operator * * @param &other the matrix object whose values are to be copied * @return the resulting matrix */ Matrix<Element>& operator=(const Matrix<Element>& other); /** * In-place change of the current matrix to a matrix of all ones * * @return the resulting matrix */ Matrix<Element>& Ones() { for (size_t row = 0; row < rows; ++row) { for (size_t col = 0; col < cols; ++col) { data[row][col] = 1; } } return *this; } /** * In-place modulo reduction * * @return the resulting matrix */ Matrix<Element>& ModEq(const Element& modulus); /** * modular subtraction * * @return the resulting matrix */ Matrix<Element>& ModSubEq(Matrix<Element> const& b, const Element& modulus); /** * Fill matrix using the same element * * @param &val the element the matrix is filled by * * @return the resulting matrix */ Matrix<Element>& Fill(const Element& val); /** * In-place change of the current matrix to Identity matrix * * @return the resulting matrix */ Matrix<Element>& Identity() { for (size_t row = 0; row < rows; ++row) { for (size_t col = 0; col < cols; ++col) { if (row == col) { data[row][col] = 1; } else { data[row][col] = 0; } } } return *this; } /** * Sets the first row to be powers of two for when the base is two * * @param base is the base the digits of the matrix are represented in * @return the resulting matrix */ template <typename T = Element, typename std::enable_if<!std::is_same<T, M2DCRTPoly>::value && !std::is_same<T, M4DCRTPoly>::value && !std::is_same<T, M6DCRTPoly>::value, bool>::type = true> Matrix<T> GadgetVector(int64_t base = 2) const { Matrix<T> g(allocZero, rows, cols); auto base_matrix = allocZero(); size_t k = cols / rows; base_matrix = base; g(0, 0) = 1; for (size_t i = 1; i < k; i++) { g(0, i) = g(0, i - 1) * base_matrix; } for (size_t row = 1; row < rows; row++) { for (size_t i = 0; i < k; i++) { g(row, i + row * k) = g(0, i); } } return g; } template <typename T = Element, typename std::enable_if<std::is_same<T, M2DCRTPoly>::value || std::is_same<T, M4DCRTPoly>::value || std::is_same<T, M6DCRTPoly>::value, bool>::type = true> Matrix<T> GadgetVector(int64_t base = 2) const { Matrix<T> g(allocZero, rows, cols); auto base_matrix = allocZero(); base_matrix = base; size_t bk = 1; auto params = g(0, 0).GetParams()->GetParams(); uint64_t digitCount = (uint64_t)ceil(log2(params[0]->GetModulus().ConvertToDouble()) / log2(base)); for (size_t k = 0; k < digitCount; k++) { for (size_t i = 0; i < params.size(); i++) { NativePoly temp(params[i]); temp = bk; g(0, k + i * digitCount).SetElementAtIndex(i, std::move(temp)); } bk *= base; } size_t kCols = cols / rows; for (size_t row = 1; row < rows; row++) { for (size_t i = 0; i < kCols; i++) { g(row, i + row * kCols) = g(0, i); } } return g; } /** * Computes the infinity norm * * @return the norm in double format */ template <typename T = Element, typename std::enable_if<std::is_same<T, double>::value || std::is_same<T, int>::value || std::is_same<T, int64_t>::value || std::is_same<T, Field2n>::value, bool>::type = true> double Norm() const { OPENFHE_THROW(not_available_error, "Norm not defined for this type"); } template <typename T = Element, typename std::enable_if<!std::is_same<T, double>::value && !std::is_same<T, int>::value && !std::is_same<T, int64_t>::value && !std::is_same<T, Field2n>::value, bool>::type = true> double Norm() const { double retVal = 0.0; double locVal = 0.0; for (size_t row = 0; row < rows; ++row) { for (size_t col = 0; col < cols; ++col) { locVal = data[row][col].Norm(); if (locVal > retVal) { retVal = locVal; } } } return retVal; } /** * Matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ Matrix<Element> Mult(Matrix<Element> const& other) const; /** * Operator for matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ Matrix<Element> operator*(Matrix<Element> const& other) const { return Mult(other); } /** * Multiplication of matrix by a scalar * * @param &other the multiplier element * @return the result of multiplication */ Matrix<Element> ScalarMult(Element const& other) const { Matrix<Element> result(*this); #pragma omp parallel for for (size_t col = 0; col < result.cols; ++col) { for (size_t row = 0; row < result.rows; ++row) { result.data[row][col] = result.data[row][col] * other; } } return result; } /** * Operator for scalar multiplication * * @param &other the multiplier element * @return the result of multiplication */ Matrix<Element> operator*(Element const& other) const { return ScalarMult(other); } /** * Equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool Equal(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { return false; } for (size_t i = 0; i < rows; ++i) { for (size_t j = 0; j < cols; ++j) { if (data[i][j] != other.data[i][j]) { return false; } } } return true; } /** * Operator for equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool operator==(Matrix<Element> const& other) const { return Equal(other); } /** * Operator for non-equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool operator!=(Matrix<Element> const& other) const { return !Equal(other); } /** * Get property to access the data as a vector of vectors * * @return the data as vector of vectors */ const data_t& GetData() const { return data; } /** * Get property to access the number of rows in the matrix * * @return the number of rows */ size_t GetRows() const { return rows; } /** * Get property to access the number of columns in the matrix * * @return the number of columns */ size_t GetCols() const { return cols; } /** * Get property to access the zero allocator for the matrix * * @return the lambda function corresponding to the element zero allocator */ alloc_func GetAllocator() const { return allocZero; } /** * Sets the evaluation or coefficient representation for all ring elements * that support the SetFormat method * * @param &format the enum value corresponding to coefficient or evaluation * representation */ void SetFormat(Format format); /** * Matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ Matrix<Element> Add(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { OPENFHE_THROW(math_error, "Addition operands have incompatible dimensions"); } Matrix<Element> result(*this); #pragma omp parallel for for (size_t j = 0; j < cols; ++j) { for (size_t i = 0; i < rows; ++i) { result.data[i][j] += other.data[i][j]; } } return result; } /** * Operator for matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ Matrix<Element> operator+(Matrix<Element> const& other) const { return this->Add(other); } /** * Operator for in-place addition * * @param &other the matrix to be added * @return the resulting matrix (same object) */ Matrix<Element>& operator+=(Matrix<Element> const& other); /** * Matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ Matrix<Element> Sub(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { OPENFHE_THROW(math_error, "Subtraction operands have incompatible dimensions"); } Matrix<Element> result(allocZero, rows, other.cols); #pragma omp parallel for for (size_t j = 0; j < cols; ++j) { for (size_t i = 0; i < rows; ++i) { result.data[i][j] = data[i][j] - other.data[i][j]; } } return result; } /** * Operator for matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ Matrix<Element> operator-(Matrix<Element> const& other) const { return this->Sub(other); } /** * Operator for in-place matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix (same object) */ Matrix<Element>& operator-=(Matrix<Element> const& other); /** * Matrix transposition * * @return the resulting matrix */ Matrix<Element> Transpose() const; // YSP The signature of this method needs to be changed in the future /** * Matrix determinant - found using Laplace formula with complexity O(d!), * where d is the dimension * * @param *result where the result is stored */ void Determinant(Element* result) const; // Element Determinant() const; /** * Cofactor matrix - the matrix of determinants of the minors A_{ij} * multiplied by -1^{i+j} * * @return the cofactor matrix for the given matrix */ Matrix<Element> CofactorMatrix() const; /** * Add rows to bottom of the matrix * * @param &other the matrix to be added to the bottom of current matrix * @return the resulting matrix */ Matrix<Element>& VStack(Matrix<Element> const& other); /** * Add columns the right of the matrix * * @param &other the matrix to be added to the right of current matrix * @return the resulting matrix */ Matrix<Element>& HStack(Matrix<Element> const& other); /** * Matrix indexing operator - writeable instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ Element& operator()(size_t row, size_t col) { return data[row][col]; } /** * Matrix indexing operator - read-only instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ Element const& operator()(size_t row, size_t col) const { return data[row][col]; } /** * Matrix row extractor * * @param &row row index * @return the row at the index */ Matrix<Element> ExtractRow(size_t row) const { Matrix<Element> result(this->allocZero, 1, this->cols); int i = 0; for (auto& elem : this->GetData()[row]) { result(0, i) = elem; i++; } return result; // return *this; } /** * Matrix column extractor * * @param &col col index * @return the col at the index */ Matrix<Element> ExtractCol(size_t col) const { Matrix<Element> result(this->allocZero, this->rows, 1); for (size_t i = 0; i < this->rows; i++) { result(i, 0) = data[i][col]; } return result; // return *this; } /** * Matrix rows extractor in a range from row_start to row_and; inclusive * * @param &row_start &row_end row indices * @return the rows in the range delimited by indices inclusive */ inline Matrix<Element> ExtractRows(size_t row_start, size_t row_end) const { Matrix<Element> result(this->allocZero, row_end - row_start + 1, this->cols); for (usint row = row_start; row < row_end + 1; row++) { int i = 0; for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) { result(row - row_start, i) = *elem; i++; } } return result; } friend std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m) { os << "[ "; for (size_t row = 0; row < m.GetRows(); ++row) { os << "[ "; for (size_t col = 0; col < m.GetCols(); ++col) { os << m(row, col) << " "; } os << "]\n"; } os << " ]\n"; return os; } /** * Call switch format for each (ring) element * */ void SwitchFormat(); #define NOT_AN_ELEMENT_MATRIX(T) \ template <> \ void Matrix<T>::SwitchFormat() { \ OPENFHE_THROW(not_available_error, "Not a matrix of Elements"); \ } /* * Multiply the matrix by a vector whose elements are all 1's. This causes * the elements of each row of the matrix to be added and placed into the * corresponding position in the output vector. */ Matrix<Element> MultByUnityVector() const; /* * Multiply the matrix by a vector of random 1's and 0's, which is the same as * adding select elements in each row together. Return a vector that is a rows * x 1 matrix. */ Matrix<Element> MultByRandomVector(std::vector<int> ranvec) const; template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(::cereal::make_nvp("d", data)); ar(::cereal::make_nvp("r", rows)); ar(::cereal::make_nvp("c", cols)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { OPENFHE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(::cereal::make_nvp("d", data)); ar(::cereal::make_nvp("r", rows)); ar(::cereal::make_nvp("c", cols)); // users will need to SetAllocator for any newly deserialized matrix } std::string SerializedObjectName() const { return "Matrix"; } static uint32_t SerializedVersion() { return 1; } private: data_t data; uint32_t rows; uint32_t cols; alloc_func allocZero; // mutable int NUM_THREADS = 1; // deep copy of data - used for copy constructor void deepCopyData(data_t const& src) { data.clear(); data.resize(src.size()); for (size_t row = 0; row < src.size(); ++row) { for (auto elem = src[row].begin(); elem != src[row].end(); ++elem) { data[row].push_back(*elem); } } } }; /** * Operator for scalar multiplication of matrix * * @param &e element * @param &M matrix * @return the resulting matrix */ template <class Element> Matrix<Element> operator*(Element const& e, Matrix<Element> const& M) { return M.ScalarMult(e); } /** * Generates a matrix of rotations. See pages 7-8 of * https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ template <typename Element> Matrix<typename Element::Integer> Rotate(Matrix<Element> const& inMat); /** * Each element becomes a square matrix with columns of that element's * rotations in coefficient form. See pages 7-8 of * https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ template <typename Element> Matrix<typename Element::Vector> RotateVecResult(Matrix<Element> const& inMat); /** * Stream output operator * * @param &os stream * @param &m matrix to be outputted * @return the chained stream */ template <class Element> std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m); /** * Gives the Choleshky decomposition of the input matrix. * The assumption is that covariance matrix does not have large coefficients * because it is formed by discrete gaussians e and s; this implies int32_t can * be used This algorithm can be further improved - see the Darmstadt paper * section 4.4 http://eprint.iacr.org/2013/297.pdf * * @param &input the matrix for which the Cholesky decomposition is to be * computed * @return the resulting matrix of floating-point numbers */ Matrix<double> Cholesky(const Matrix<int32_t>& input); void Cholesky(const Matrix<int32_t>& input, Matrix<double>& result); /** * Convert a matrix of integers from BigInteger to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ Matrix<int32_t> ConvertToInt32(const Matrix<BigInteger>& input, const BigInteger& modulus); /** * Convert a matrix of BigVector to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ Matrix<int32_t> ConvertToInt32(const Matrix<BigVector>& input, const BigInteger& modulus); /** * Split a vector of int32_t into a vector of ring elements with ring dimension * n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template <typename Element> Matrix<Element> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, const std::shared_ptr<typename Element::Params> params); #define SPLIT64_FOR_TYPE(T) \ template <> \ Matrix<T> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, \ const std::shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \ size_t rows = other.GetRows() / n; \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int64_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row * n + i, 0); \ result(row, 0) = values; \ } \ return result; \ } /** * Another method for splitting a vector of int32_t into a vector of ring * elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template <typename Element> Matrix<Element> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, const std::shared_ptr<typename Element::Params> params); #define SPLIT32ALT_FOR_TYPE(T) \ template <> \ Matrix<T> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, \ const std::shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \ size_t rows = other.GetRows(); \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int32_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row, i); \ result(row, 0) = values; \ } \ return result; \ } /** * Split a vector of int64_t into a vector of ring elements with ring dimension * n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template <typename Element> Matrix<Element> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, const std::shared_ptr<typename Element::Params> params); #define SPLIT64ALT_FOR_TYPE(T) \ template <> \ Matrix<T> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, \ const std::shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \ size_t rows = other.GetRows(); \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int64_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row, i); \ result(row, 0) = values; \ } \ return result; \ } } // namespace lbcrypto #endif // LBCRYPTO_MATH_MATRIX_H
for_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -triple x86_64-unknown-unknown -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -triple x86_64-unknown-unknown -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -triple x86_64-unknown-unknown -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -triple x86_64-unknown-unknown -verify=expected,omp50 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp for for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}} #pragma omp for // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}} #pragma omp for foo void test_no_clause() { int i; #pragma omp for for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp for' must be a for loop}} #pragma omp for ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp for for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}} #pragma omp for foo bar for (i = 0; i < 16; ++i) ; // At one time, this failed an assert. // expected-error@+1 {{unexpected OpenMP clause 'num_teams' in directive '#pragma omp for'}} #pragma omp for num_teams(3) for (i = 0; i < 16; ++i) ; // At one time, this error was reported twice. // expected-error@+1 {{unexpected OpenMP clause 'uniform' in directive '#pragma omp for'}} #pragma omp for uniform for (i = 0; i < 16; ++i) ; // expected-error@+1 {{unexpected OpenMP clause 'if' in directive '#pragma omp for'}} #pragma omp for if(0) for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}} #pragma omp for; for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}} #pragma omp parallel #pragma omp for linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}} #pragma omp for private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}} #pragma omp for, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp for collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}} #pragma omp parallel #pragma omp for collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for collapse(2) for (i = 0; i < 16; ++i) // expected-note {{defined as private}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}} #pragma omp for for (__int128 ii = 0; ii < 10; ii++) { c[ii] = a[ii] + b[ii]; } #pragma omp for order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp for ordered order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} omp50-error {{'order' clause with 'concurrent' modifier cannot be specified if an 'ordered' clause is specified}} omp50-note {{'ordered' clause}} for (int i = 0; i < 10; ++i) ; }
openmpLBM.c
// http://www.caam.rice.edu/~timwar/CAAM210/Flows.html #include <math.h> #include <stdlib.h> #include <stdio.h> #include "png_util.h" #include <omp.h> #define dfloat double #define FLUID 0 #define WALL 1 #define NSPECIES 9 // loop up 1D array index from 2D node coordinates int idx(int N, int n, int m){ return n + m*(N+2); } void lbmInput(const char *imageFileName, dfloat threshold, int *outN, int *outM, unsigned char **rgb, unsigned char **alpha, int **nodeType){ int n,m, N,M; // read png file read_png(imageFileName, &N, &M, rgb, alpha); // pad to guarantee space around obstacle and extend the wake int Npad = 3*N; int Mpad = 2*M; if(Npad>8192) Npad = 8192; if(Mpad>8192) Mpad = 8192; // threshold walls based on gray scale *nodeType = (int*) calloc((Npad+2)*(Mpad+2), sizeof(int)); // mark pixels by gray scale intensity unsigned char *rgbPad = (unsigned char*) calloc(3*(Npad+2)*(Mpad+2), sizeof(unsigned char)); unsigned char *alphaPad = (unsigned char*) calloc((Npad+2)*(Mpad+2), sizeof(unsigned char)); int wallCount = 0; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int offset = ((n-1)+(m-1)*N); dfloat r = (*rgb)[3*offset+0]; dfloat g = (*rgb)[3*offset+1]; dfloat b = (*rgb)[3*offset+2]; dfloat a = (*alpha) ? (*alpha)[offset]:255; // center image in padded region (including halo zone) int id = idx(Npad,n+(N/4),m+(M/2)); if(a==0) (*nodeType)[id] = FLUID; else (*nodeType)[id] = WALL*(sqrt(r*r+g*g+b*b)<threshold); wallCount += (*nodeType)[id]; rgbPad[3*id+0] = r; rgbPad[3*id+1] = g; rgbPad[3*id+2] = b; alphaPad[id] = 255; } } for(n=1;n<=Npad;++n){ (*nodeType)[idx(Npad,n,1)] = WALL; (*nodeType)[idx(Npad,n,Mpad)] = WALL; } free(*rgb); free(*alpha); *rgb = rgbPad; *alpha = alphaPad; printf("wallCount = %d (%g percent of %d x %d nodes)\n", wallCount, 100.*((dfloat)wallCount/((Npad+2)*(Mpad+2))), Npad, Mpad); *outN = Npad; *outM = Mpad; } void lbmOutput(const char *fname, const int *nodeType, unsigned char *rgb, unsigned char *alpha, const dfloat c, const dfloat dx, int N, int M, const dfloat *f){ int n,m,s; FILE *bah = fopen(fname, "w"); // compute vorticity dfloat *Ux = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); dfloat *Uy = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); dfloat fnm[NSPECIES]; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int base = idx(N, n, m); for(s=0;s<NSPECIES;++s) fnm[s] = f[base+s*(N+2)*(M+2)]; const dfloat rho = fnm[0]+fnm[1]+fnm[2]+fnm[3]+fnm[4]+fnm[5]+fnm[6]+fnm[7]+fnm[8]; // macroscopic momentum Ux[base] = (fnm[1] - fnm[3] + fnm[5] - fnm[6] - fnm[7] + fnm[8])*c/rho; Uy[base] = (fnm[2] - fnm[4] + fnm[5] + fnm[6] - fnm[7] - fnm[8])*c/rho; } } dfloat plotMin = -4, plotMax = 4; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int id = idx(N,n,m); // over write pixels in fluid region if(nodeType[id]==FLUID){ unsigned char r,g,b,a; // reconstruct macroscopic density dfloat rho = 0; for(s=0;s<NSPECIES;++s) rho += f[id+s*(N+2)*(M+2)]; rho = ((rho-plotMin)/(plotMax-plotMin)); // rescale dfloat dUxdy = (Ux[idx(N,n,m+1)]-Ux[idx(N,n,m-1)])/(2.*dx); dfloat dUydx = (Uy[idx(N,n+1,m)]-Uy[idx(N,n-1,m)])/(2.*dx); dfloat curlU = dUydx-dUxdy; curlU = ((curlU-plotMin)/(plotMax-plotMin)); r = 255*curlU; g = 255*curlU; b = 255*curlU; a = 255; rgb[idx(N,n,m)*3+0] = r; rgb[idx(N,n,m)*3+1] = g; rgb[idx(N,n,m)*3+2] = b; alpha[idx(N,n,m)] = a; } } } write_png(bah, N+2, M+2, rgb, alpha); fclose(bah); free(Ux); free(Uy); } // weights used to compute equilibrium distribution (post collision) const dfloat w0 = 4.f/9.f, w1 = 1.f/9.f, w2 = 1.f/9.f, w3 = 1.f/9.f; const dfloat w4 = 1.f/9.f, w5 = 1.f/36.f, w6 = 1.f/36.f, w7 = 1.f/36.f, w8 = 1.f/36.f; void lbmEquilibrium(const dfloat c, const dfloat rho, const dfloat Ux, const dfloat Uy, dfloat * feq){ // resolve macroscopic velocity into lattice particle velocity directions const dfloat U2 = Ux*Ux+Uy*Uy; const dfloat v0 = 0; const dfloat v1 = +Ux/c; const dfloat v2 = +Uy/c; const dfloat v3 = -Ux/c; const dfloat v4 = -Uy/c; const dfloat v5 = (+Ux+Uy)/c; const dfloat v6 = (-Ux+Uy)/c; const dfloat v7 = (-Ux-Uy)/c; const dfloat v8 = (+Ux-Uy)/c; // compute LBM post-collisional feq[0] = rho*w0*(1.f + 3.f*v0 + 4.5f*v0*v0 - 1.5f*U2/(c*c)); feq[1] = rho*w1*(1.f + 3.f*v1 + 4.5f*v1*v1 - 1.5f*U2/(c*c)); feq[2] = rho*w2*(1.f + 3.f*v2 + 4.5f*v2*v2 - 1.5f*U2/(c*c)); feq[3] = rho*w3*(1.f + 3.f*v3 + 4.5f*v3*v3 - 1.5f*U2/(c*c)); feq[4] = rho*w4*(1.f + 3.f*v4 + 4.5f*v4*v4 - 1.5f*U2/(c*c)); feq[5] = rho*w5*(1.f + 3.f*v5 + 4.5f*v5*v5 - 1.5f*U2/(c*c)); feq[6] = rho*w6*(1.f + 3.f*v6 + 4.5f*v6*v6 - 1.5f*U2/(c*c)); feq[7] = rho*w7*(1.f + 3.f*v7 + 4.5f*v7*v7 - 1.5f*U2/(c*c)); feq[8] = rho*w8*(1.f + 3.f*v8 + 4.5f*v8*v8 - 1.5f*U2/(c*c)); } // perform lattice streaming and collision steps void lbmUpdate(const int N, // number of nodes in x const int M, // number of nodes in y const dfloat c, // speed of sound const dfloat *tau, // relaxation rate const int * nodeType, // (N+2) x (M+2) node types const dfloat * f, // (N+2) x (M+2) x 9 fields before streaming and collisions dfloat * fnew){ // (N+2) x (M+2) x 9 fields after streaming and collisions // loop counters int n,m; // number of nodes in whole array including halo int Nall = (N+2)*(M+2); // loop over all non-halo nodes in lattice #pragma omp parallel for for(m=1;m<M+1;++m){ for(n=1;n<=N+1;++n){ // physics paramaters dfloat tauinv = 1.f/tau[idx(N,n,m)]; // discover type of node (WALL or FLUID) const int nt = nodeType[idx(N,n,m)]; dfloat fnm[NSPECIES]; // OUTFLOW if(n==N+1){ fnm[0] = f[idx(N,n, m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n-1,m) + 1*Nall]; // E bound from W fnm[2] = f[idx(N,n,m-1) + 2*Nall]; // N bound from S fnm[3] = f[idx(N,n,m) + 3*Nall]; // W bound from E fnm[4] = f[idx(N,n,m+1) + 4*Nall]; // S bound from N fnm[5] = f[idx(N,n-1,m-1) + 5*Nall]; // NE bound from SW fnm[6] = f[idx(N,n,m-1) + 6*Nall]; // NW bound from SE fnm[7] = f[idx(N,n,m+1) + 7*Nall]; // SW bound from NE fnm[8] = f[idx(N,n-1,m+1) + 8*Nall]; // SE bound from NW } else if(nt == FLUID){ fnm[0] = f[idx(N,n, m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n-1,m) + 1*Nall]; // E bound from W fnm[2] = f[idx(N,n,m-1) + 2*Nall]; // N bound from S fnm[3] = f[idx(N,n+1,m) + 3*Nall]; // W bound from E fnm[4] = f[idx(N,n,m+1) + 4*Nall]; // S bound from N fnm[5] = f[idx(N,n-1,m-1) + 5*Nall]; // NE bound from SW fnm[6] = f[idx(N,n+1,m-1) + 6*Nall]; // NW bound from SE fnm[7] = f[idx(N,n+1,m+1) + 7*Nall]; // SW bound from NE fnm[8] = f[idx(N,n-1,m+1) + 8*Nall]; // SE bound from NW } else{ // WALL reflects particles fnm[0] = f[idx(N,n,m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n,m) + 3*Nall]; // E bound from W fnm[2] = f[idx(N,n,m) + 4*Nall]; // N bound from S fnm[3] = f[idx(N,n,m) + 1*Nall]; // W bound from E fnm[4] = f[idx(N,n,m) + 2*Nall]; // S bound from N fnm[5] = f[idx(N,n,m) + 7*Nall]; // NE bound from SW fnm[6] = f[idx(N,n,m) + 8*Nall]; // NW bound from SE fnm[7] = f[idx(N,n,m) + 5*Nall]; // SW bound from NE fnm[8] = f[idx(N,n,m) + 6*Nall]; // SE bound from NW } // macroscopic density const dfloat rho = fnm[0]+fnm[1]+fnm[2]+fnm[3]+fnm[4]+fnm[5]+fnm[6]+fnm[7]+fnm[8]; if(rho<1e-4){ printf("rho(%d,%d)=%g\n", n,m,rho); exit(-1); } // macroscopic momentum const dfloat delta2 = 1e-5; const dfloat Ux = (fnm[1] - fnm[3] + fnm[5] - fnm[6] - fnm[7] + fnm[8])*c/sqrt(rho*rho+delta2); const dfloat Uy = (fnm[2] - fnm[4] + fnm[5] + fnm[6] - fnm[7] - fnm[8])*c/sqrt(rho*rho+delta2); // compute equilibrium distribution dfloat feq[NSPECIES]; lbmEquilibrium(c, rho, Ux, Uy, feq); // MRT stabilization const dfloat g0 = 1.f, g1 = -2.f, g2 = -2.f, g3 = -2.f, g4 = -2.f; const dfloat g5 = 4.f, g6 = 4.f, g7 = 4.f, g8 = 4.f; const dfloat R = g0*fnm[0] + g1*fnm[1] + g2*fnm[2]+ g3*fnm[3] + g4*fnm[4] + g5*fnm[5] + g6*fnm[6] + g7*fnm[7] + g8*fnm[8]; // relax towards post collision densities fnm[0] -= tauinv*(fnm[0]-feq[0]) + (1.f-tauinv)*w0*g0*R*0.25f; fnm[1] -= tauinv*(fnm[1]-feq[1]) + (1.f-tauinv)*w1*g1*R*0.25f; fnm[2] -= tauinv*(fnm[2]-feq[2]) + (1.f-tauinv)*w2*g2*R*0.25f; fnm[3] -= tauinv*(fnm[3]-feq[3]) + (1.f-tauinv)*w3*g3*R*0.25f; fnm[4] -= tauinv*(fnm[4]-feq[4]) + (1.f-tauinv)*w4*g4*R*0.25f; fnm[5] -= tauinv*(fnm[5]-feq[5]) + (1.f-tauinv)*w5*g5*R*0.25f; fnm[6] -= tauinv*(fnm[6]-feq[6]) + (1.f-tauinv)*w6*g6*R*0.25f; fnm[7] -= tauinv*(fnm[7]-feq[7]) + (1.f-tauinv)*w7*g7*R*0.25f; fnm[8] -= tauinv*(fnm[8]-feq[8]) + (1.f-tauinv)*w8*g8*R*0.25f; // store new densities const int base = idx(N,n,m); fnew[base+0*Nall] = fnm[0]; fnew[base+1*Nall] = fnm[1]; fnew[base+2*Nall] = fnm[2]; fnew[base+3*Nall] = fnm[3]; fnew[base+4*Nall] = fnm[4]; fnew[base+5*Nall] = fnm[5]; fnew[base+6*Nall] = fnm[6]; fnew[base+7*Nall] = fnm[7]; fnew[base+8*Nall] = fnm[8]; } } } void lbmCheck(int N, int M, dfloat *f){ int n; int nanCount = 0; for(n=0;n<NSPECIES*N*M;++n){ nanCount += isnan(f[n]); } if(nanCount){ printf("found %d nans\n", nanCount); exit(-1); } } // set initial conditions (use uniform flow f everywhere) void lbmInitialConditions(dfloat c, int N, int M, int *nodeType, dfloat *f){ int n,m,s; dfloat feqIC[NSPECIES]; dfloat feqWALL[NSPECIES]; dfloat rhoIC = 1.; dfloat UxIC = 1.; dfloat UyIC = 0.; lbmEquilibrium(c, rhoIC, UxIC, UyIC, feqIC); lbmEquilibrium(c, rhoIC, 0., 0., feqWALL); for(m=0;m<=M+1;++m){ for(n=0;n<=N+1;++n){ int base = idx(N, n, m); int s; if(n==0){ for(s=0;s<NSPECIES;++s){ f[idx(N,n,m)+s*(N+2)*(M+2)] = feqIC[s]; } } else{ for(s=0;s<NSPECIES;++s){ f[idx(N,n,m)+s*(N+2)*(M+2)] = feqWALL[s]; } } } } } void lbmRun(int N, int M, unsigned char *rgb, unsigned char *alpha, dfloat c, dfloat dx, dfloat *h_tau, int *nodeType, dfloat *f, dfloat *fnew){ int Nsteps = 300000/2, tstep = 0, iostep = 100; // time step for(tstep=0;tstep<Nsteps;++tstep){ // perform two updates lbmUpdate(N, M, c, h_tau, nodeType, f, fnew); lbmUpdate(N, M, c, h_tau, nodeType, fnew, f); // check for nans lbmCheck(N, M, f); if(!(tstep%iostep)){ printf("tstep = %d\n", tstep); char fname[BUFSIZ]; sprintf(fname, "bah%06d.png", tstep/iostep); lbmOutput(fname, nodeType, rgb, alpha, c, dx, N, M, f); } } } int main(int argc, char **argv){ if(argc!=3){ printf("usage: ./lbm foo.png threshold\n"); exit(-1); } int N, M; // size of lattice int n,m; // read threshold dfloat threshold = atof(argv[2]); char *imageFileName = strdup(argv[1]); unsigned char *rgb, *alpha; int *nodeType; lbmInput(imageFileName, threshold, &N, &M, &rgb, &alpha, &nodeType); printf("N=%d, M=%d\n", N, M); // physical parameters dfloat dx = .01; // lattice node spacings in x dfloat dt = dx*.1; // time step (also determines Mach number) dfloat c = dx/dt; // speed of sound dfloat tau = .61; // relaxation rate dfloat Reynolds = 2./((tau-.5)*c*c*dt/3.); printf("Reynolds number %g\n", Reynolds); // create lattice storage dfloat *f = (dfloat*) calloc((N+2)*(M+2)*NSPECIES, sizeof(dfloat)); dfloat *fnew = (dfloat*) calloc((N+2)*(M+2)*NSPECIES, sizeof(dfloat)); dfloat *h_tau = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); // set tau based on n index dfloat xo = .9; for(m=0;m<=M+1;++m){ for(n=0;n<=N+1;++n){ dfloat x = ((dfloat)n)/N; dfloat taunm = tau*(1 + 4*(1+tanh(10*(x-xo)))); h_tau[idx(N,n,m)] = taunm; } } // set initial flow densities lbmInitialConditions(c, N, M, nodeType, f); lbmInitialConditions(c, N, M, nodeType, fnew); // time step the LBM solver lbmRun(N, M, rgb, alpha, c, dx, h_tau, nodeType, f, fnew); // output result as image lbmOutput("bahFinal.png", nodeType, rgb, alpha, c, dx, N, M, f); exit(0); return 0; }
GB_extract_vector_list.c
//------------------------------------------------------------------------------ // GB_extract_vector_list: extract vector indices for all entries in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Constructs a list of vector indices for each entry in a matrix. Creates // the output J for GB_extractTuples, and I for GB_transpose when the qsort // method is used. #include "GB_ek_slice.h" void GB_extract_vector_list // construct vector indices J, for each entry ( // output: int64_t *restrict J, // size nnz(A) or more // input: const GrB_Matrix A, int nthreads ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (J != NULL) ; ASSERT (A != NULL) ; ASSERT (nthreads >= 1) ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; //-------------------------------------------------------------------------- // determine the # of tasks to use //-------------------------------------------------------------------------- int64_t anz = GB_NNZ (A) ; int ntasks = (nthreads == 1) ? 1 : (2 * nthreads) ; ntasks = GB_IMIN (ntasks, anz) ; ntasks = GB_IMAX (ntasks, 1) ; //-------------------------------------------------------------------------- // slice the entries for each task //-------------------------------------------------------------------------- // Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 and // vectors kfirst_slice [tid] to klast_slice [tid]. The first and last // vectors may be shared with prior slices and subsequent slices. int64_t pstart_slice [ntasks+1] ; int64_t kfirst_slice [ntasks] ; int64_t klast_slice [ntasks] ; GB_ek_slice (pstart_slice, kfirst_slice, klast_slice, A, ntasks) ; //-------------------------------------------------------------------------- // extract the vector index for each entry //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_slice [tid] ; int64_t klast = klast_slice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t j = (Ah == NULL) ? k : Ah [k] ; int64_t pA_start, pA_end ; GB_get_pA_and_pC (&pA_start, &pA_end, NULL, tid, k, kfirst, klast, pstart_slice, NULL, NULL, Ap) ; //------------------------------------------------------------------ // extract vector indices of A(:,j) //------------------------------------------------------------------ for (int64_t p = pA_start ; p < pA_end ; p++) { J [p] = j ; } } } }
parallelDeterminant1.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> void printNxNMatrix(double** matrix, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { // printf("%f, ",matrix[i][j]); } // printf("\n"); } // printf("\n"); } void printNxNSubMatrix(double** sub, int m) { for (int i = 0; i < m; i++) { for (int j = 0; j < m; j++) { // printf("%f, ",sub[i][j]); } // printf("\n"); } // printf("\n"); } double** submatrix(double** matrix, double** sub, int n, int i) { // printf("matrix submitted to calculate submatrix:\n"); printNxNSubMatrix(matrix, n); for (int y = 1; y < n; y++) { int sub_index = 0; for (int x = 0; x < n; x++) { if (x != i) { sub[y-1][sub_index] = matrix[y][x]; // printf("sub[%d][%d] = matrix[%d][%d] = %f\n", y-1, sub_index, y,x, sub[y-1][sub_index]); sub_index += 1; } } } return sub; } double det2x2(double** matrix, int n) { double det; if (n != 2) { exit(0); } det = matrix[0][0] * matrix[1][1] - matrix[1][0] * matrix[0][1]; return det; } double det2_seq(double** sub, int m) { // printf("input size int m = %d.\n",m); // printf("sub in det2_seq:\n"); printNxNSubMatrix(sub, m); double det = 0; if (m == 2) det = det2x2(sub, m); else { double*** subs = (double***)malloc((m)*sizeof(double**)); for(int m1 = 0; m1 < m; m1++){ subs[m1] = (double**)malloc((m-1)*sizeof(double*)); for(int m2 = 0; m2 < m-1; m2++) subs[m1][m2] = (double*)malloc((m-1)*sizeof(double)); } for (int i = 0; i < m; i++) { double x = sub[0][i]; double sign = (double) pow(-1.0, 2.0 + i); subs[i] = submatrix(sub, subs[i], m, i); printNxNSubMatrix(subs[i], m-1); det += sign * x * det2_seq(subs[i], m-1); } } // printf("have determinate %f.\n", det); return det; } double det2_par(double** matrix, int n) { double det = 0; if (n==2) det = det2x2(matrix, n); else { double*** submatrixs = (double***)malloc((n)*sizeof(double**)); for(int m1 = 0; m1 < n; m1++){ submatrixs[m1] = (double**)malloc((n-1)*sizeof(double*)); for(int m2 = 0; m2 < n-1; m2++) submatrixs[m1][m2] = (double*)malloc((n-1)*sizeof(double)); } #pragma omp parallel for reduction(+:det) for (int i = 0; i < n; i++) { double x = matrix[0][i]; double sign = (double)pow(-1.0, 2.0 + i); submatrix(matrix, submatrixs[i], n, i); // printf("Current submatrix:\n"); printNxNSubMatrix(submatrixs[i], n-1); det += sign * x * det2_par(submatrixs[i], n-1); } } return det; } int main() { /*matrix = {{1.0, 4.0, 8.0, 8.0}, {2.0, 2.0, 2.0, 8.0}, {1.0, 2.0, 3.0, 4.0}, {9.0, 8.0, 7.0, 6.0}};*/ int i, j, n; FILE *fp; // read in file1 fp = fopen("largerNxN50.txt", "r"); fscanf(fp, "%i", &(n)); // printf("n is %i \n", n); double** matrix = (double**)malloc(n*sizeof(double*)); for (i = 0; i<n; i++){ matrix[i] = (double*)malloc((n)*sizeof(double)); } for (i = 0; i < n; ++i) { for (j = 0;j < n; ++j) { fscanf(fp, "%lf", &matrix[i][j]); } } fclose(fp); // printf("Input matrix:\n"); omp_set_num_threads(2); // printNxNMatrix(matrix, n); // printf("Finding Determinants...\n"); double t = omp_get_wtime(); det2_par(matrix, n); printf("excution time is %0.5lf seconds\n", (omp_get_wtime()-t)); // printf("Determinate is %f.", det2_par(matrix, n)); return 0; }
lotus85_fmt_plug.c
/* * This software is Copyright (c) 2013 Sébastien Kaczmarek <skaczmarek@quarkslab.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Fixed the format to crack multiple hashes + added OMP support (Dhiru * Kholia). */ #if FMT_EXTERNS_H extern struct fmt_main fmt_lotus_85; #elif FMT_REGISTERS_H john_register_one(&fmt_lotus_85); #else #include <stdio.h> #include <string.h> #include "stdint.h" #include "sha.h" #include <openssl/rc2.h> #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 64 // XXX tune me! static int omp_t = 1; #endif #include "formats.h" #include "common.h" #include "memdbg.h" /* Plugin definition */ #define FORMAT_LABEL "lotus85" #define FORMAT_NAME "Lotus Notes/Domino 8.5" #define ALGORITHM_NAME "8/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 0x64 #define BINARY_SIZE 0 #define BINARY_LENGTH 5 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 // #define MAX_KEYS_PER_CRYPT 0x900 // WTF? #define MAX_KEYS_PER_CRYPT 1 #define LOTUS85_MAX_BLOB_SIZE 0x64 #define LOTUS85_MIN_BLOB_SIZE 40 // XXX fictional value, but isn't this length fixed? /* Globals */ static const char LOTUS85_UNIQUE_STRING[] = "Lotus Notes Password Pad Uniquifier"; static const char LOTUS85_BASE16_CHARSET[] = "0123456789ABCDEFabcdef"; static uint8_t ebits_to_num[256]= { 0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a, 0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0, 0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b, 0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a, 0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda, 0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36, 0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8, 0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c, 0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17, 0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60, 0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72, 0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa, 0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd, 0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e, 0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b, 0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf, 0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77, 0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6, 0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3, 0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3, 0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e, 0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c, 0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d, 0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2, 0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46, 0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5, 0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97, 0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5, 0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef, 0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f, 0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf, 0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab, }; static struct custom_salt { uint8_t lotus85_user_blob[LOTUS85_MAX_BLOB_SIZE]; uint32_t lotus85_user_blob_len; } *cur_salt; /* * 5 bytes digest computed by the algorithm * As the password is used to derive a RC2 key and decipher the user blob * the reference digest is always different and we should track them all */ static uint8_t (*lotus85_last_binary_hash1)[BINARY_LENGTH]; static uint8_t (*lotus85_last_binary_hash2)[BINARY_LENGTH]; /* Plaintext passwords history requested by JtR engine */ static char (*lotus85_saved_passwords)[PLAINTEXT_LENGTH+1]; /* Decipher user.id user blob */ static void decipher_userid_blob(uint8_t *ciphered_blob, uint32_t len, uint8_t *userid_key, uint8_t *deciphered_blob) { RC2_KEY rc_key; uint8_t buf[LOTUS85_MAX_BLOB_SIZE+8],rc_iv[8]; memset(buf, 0x0, sizeof(buf)); memset(rc_iv, 0, sizeof(rc_iv)); RC2_set_key(&rc_key, 8, userid_key, 64); RC2_cbc_encrypt(ciphered_blob, buf, len, &rc_key, rc_iv, RC2_DECRYPT); memcpy(deciphered_blob, buf, len); } /* Custom hash transformation function */ static void custom_password_hash_trans(uint8_t *data, uint8_t *out, uint8_t *state) { uint8_t buffer[48]; size_t i, j; uint8_t c; memset(buffer, 0, sizeof(buffer)); memcpy(buffer, state, 16); memcpy(buffer + 16, data, 16); for(i=0;i<16;i+=4) { buffer[32+i] = data[i] ^ state[i]; buffer[32+i+1] = data[i+1] ^ state[i+1]; buffer[32+i+2] = data[i+2] ^ state[i+2]; buffer[32+i+3] = data[i+3] ^ state[i+3]; } for(j=c=0;j<18;j++) { for(i=0;i<sizeof(buffer);i+=6) { buffer[i] ^= ebits_to_num[(c-i+48) & 0xFF]; buffer[i+1] ^= ebits_to_num[(buffer[i]-i+47) & 0xFF]; buffer[i+2] ^= ebits_to_num[(buffer[i+1]-i+46) & 0xFF]; buffer[i+3] ^= ebits_to_num[(buffer[i+2]-i+45) & 0xFF]; buffer[i+4] ^= ebits_to_num[(buffer[i+3]-i+44) & 0xFF]; buffer[i+5] ^= ebits_to_num[(buffer[i+4]-i+43) & 0xFF]; c = buffer[i+5]; } } memcpy(state, buffer, 16); c = out[15]; for(i=0;i<16;i+=4) { out[i] ^= ebits_to_num[data[i] ^ c]; out[i+1] ^= ebits_to_num[data[i+1] ^ out[i]]; out[i+2] ^= ebits_to_num[data[i+2] ^ out[i+1]]; out[i+3] ^= ebits_to_num[data[i+3] ^ out[i+2]]; c = out[i+3]; } } /* Custom hash function */ static void custom_password_hash(const char *password, uint8_t *out) { uint8_t block1[16], state[16], block2[16]; size_t len, rlen, block_pos = 0; len = strlen(password); memset(state, 0, sizeof(state)); memset(block2, 0, sizeof(block2)); while((block_pos + 15) < len) { memcpy(block1, password+block_pos, sizeof(block1)); custom_password_hash_trans(block1, state, block2); block_pos += 16; } if(block_pos != len) { rlen = len - block_pos; memcpy(block1, password+block_pos, rlen); memset(block1+rlen, 16-rlen, 16-rlen); custom_password_hash_trans(block1, state, block2); } else { memset(block1, sizeof(block1), sizeof(block1)); custom_password_hash_trans(block1, state, block2); } custom_password_hash_trans(state, state, block2); memcpy(out, block2, sizeof(block2)); } /* Hash cste::password with sha1 */ static void password_hash(const char *password, uint8_t *hash) { SHA_CTX s_ctx; uint8_t digest[SHA_DIGEST_LENGTH]; SHA1_Init(&s_ctx); SHA1_Update(&s_ctx, LOTUS85_UNIQUE_STRING, strlen(LOTUS85_UNIQUE_STRING)); SHA1_Update(&s_ctx, password, strlen(password)); SHA1_Final(digest, &s_ctx); memcpy(hash, digest, sizeof(digest)); } /* Hash/checksum function used for key derivation from plaintext password */ static void compute_key_mac(uint8_t *key, size_t len, uint8_t *mac, size_t mac_len) { size_t i, j, mlen=mac_len-1; uint8_t k; for(i=0;i<16;i++) { k = ebits_to_num[mac[0] ^ mac[1]]; for(j=0;j<mlen;j++) { mac[j] = mac[j+1]; } mac[mlen] = key[i] ^ k; } } /* Hash/checksum function used for digest storage */ static void compute_msg_mac(uint8_t *msg, size_t len, uint8_t *msg_mac) { size_t i, j; uint8_t c; for(i=j=0;i<len;i++) { if(j!=4) { msg_mac[j] = msg[i] ^ ebits_to_num[msg_mac[j] ^ msg_mac[j+1]]; j++; } else { msg_mac[j] = msg[i] ^ ebits_to_num[msg_mac[j] ^ msg_mac[0]]; j = 0; } } c = msg_mac[0]; for(i=0;i<4;i++) { msg_mac[i] = msg_mac[i+1]; } msg_mac[i] = c; } /* * Derive password to retrieve the RC2 secret key * used when deciphering user blob stored in user.id file */ static void get_user_id_secret_key(const char *password, uint8_t *secret_key) { uint8_t key[16+20], mac[8]; memset(key, 0, sizeof(key)); memset(mac, 0, sizeof(mac)); custom_password_hash(password, key); password_hash(password, key+16); compute_key_mac(key, sizeof(key), mac, sizeof(mac)); memcpy(secret_key, mac, sizeof(mac)); } /* Plugin initialization */ static void lotus85_init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif lotus85_saved_passwords = mem_calloc_tiny( (PLAINTEXT_LENGTH + 1) * self->params.max_keys_per_crypt, MEM_ALIGN_CACHE); lotus85_last_binary_hash1 = mem_calloc_tiny( BINARY_LENGTH * self->params.max_keys_per_crypt, MEM_ALIGN_CACHE); lotus85_last_binary_hash2 = mem_calloc_tiny( BINARY_LENGTH * self->params.max_keys_per_crypt, MEM_ALIGN_CACHE); } /* Check if given ciphertext (hash) format is valid */ static int lotus85_valid(char *ciphertext,struct fmt_main *self) { int i,len; len = strlen(ciphertext); if(len % 2) return 0; if((len >> 1) > LOTUS85_MAX_BLOB_SIZE) return 0; if((len >> 1) < LOTUS85_MIN_BLOB_SIZE) return 0; for (i=0;i<len;i++) if(!strchr(LOTUS85_BASE16_CHARSET,ciphertext[i])) return 0; return 1; } static void *get_salt(char *ciphertext) { int i,len; static struct custom_salt cs; len = strlen(ciphertext) >> 1; for (i = 0; i < len; i++) cs.lotus85_user_blob[i] = (atoi16[ARCH_INDEX(ciphertext[i << 1])] << 4) + atoi16[ARCH_INDEX(ciphertext[(i << 1) + 1])]; cs.lotus85_user_blob_len = len; return (void*)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } /* Set password at given index */ static void lotus85_set_key(char *key,int index) { strnzcpy(lotus85_saved_passwords[index],key,strlen(key)+1); } /* Return password at given index as string */ static char *lotus85_get_key(int index) { return lotus85_saved_passwords[index]; } /* Main callback to compute lotus digest */ static int lotus85_crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; /* Compute digest for all given plaintext passwords */ #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char user_key[8], deciphered_userid[LOTUS85_MAX_BLOB_SIZE]; memset(lotus85_last_binary_hash1[index], 0, BINARY_LENGTH); memset(lotus85_last_binary_hash2[index], 0, BINARY_LENGTH); memset(user_key, 0, sizeof(user_key)); memset(deciphered_userid, 0, sizeof(deciphered_userid)); /* Derive password and retrieve RC2 key */ get_user_id_secret_key(lotus85_saved_passwords[index], user_key); /* Deciphered user blob stored in user.id file */ decipher_userid_blob(cur_salt->lotus85_user_blob, cur_salt->lotus85_user_blob_len, user_key, deciphered_userid); /* Store first deciphered digest */ memcpy(lotus85_last_binary_hash1[index], deciphered_userid + cur_salt->lotus85_user_blob_len - BINARY_LENGTH, BINARY_LENGTH); /* Compute digest of deciphered message */ compute_msg_mac(deciphered_userid, cur_salt->lotus85_user_blob_len - BINARY_LENGTH, lotus85_last_binary_hash2[index]); } return count; } /* Check if one of last computed hashs match */ static int lotus85_cmp_all(void *binary,int count) { int i; for(i = 0; i < count; i++) { if(!memcmp(lotus85_last_binary_hash1[i],lotus85_last_binary_hash2[i],BINARY_LENGTH)) return 1; } return 0; } /* Check if last computed hash match */ static int lotus85_cmp_one(void *binary,int index) { return !memcmp(lotus85_last_binary_hash1[index],lotus85_last_binary_hash2[index],BINARY_LENGTH); } /* No ASCII ciphertext, thus returns true */ static int lotus85_cmp_exact(char *source,int index) { return 1; } static struct fmt_tests lotus85_tests[] = { {"0040B2B17C344C236953F955B28E4865014034D1F664489D7F42B35FB6928A94DCFFEF7750CE029F94C83A582A80B4662D49B3FA45816143", "notesisterrible"}, {"CBCFC612FAE3154316223787C7CD29AD39BEDF4288FCDE310B32FD809C75F5FDC521667D5F6E7A047766F0E60952F7891593FFAF45AD0C15", "openwall"}, {NULL} }; /* JtR lotus 8.5 structure registration */ struct fmt_main fmt_lotus_85 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif lotus85_tests }, { lotus85_init, fmt_default_done, fmt_default_reset, fmt_default_prepare, lotus85_valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, lotus85_set_key, /* Set plaintext password */ lotus85_get_key, /* Get plaintext password */ fmt_default_clear_keys, lotus85_crypt_all, /* Main hash funcion */ { fmt_default_get_hash }, lotus85_cmp_all, /* Compare * hash (binary) */ lotus85_cmp_one, /* Compare 1 hash (binary) */ lotus85_cmp_exact } }; #endif /* plugin stanza */
GB_binop__bxnor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int32) // A.*B function (eWiseMult): GB (_AemultB_03__bxnor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int32) // C=scalar+B GB (_bind1st__bxnor_int32) // C=scalar+B' GB (_bind1st_tran__bxnor_int32) // C=A+scalar GB (_bind2nd__bxnor_int32) // C=A'+scalar GB (_bind2nd_tran__bxnor_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT32 || GxB_NO_BXNOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bxnor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; MemoryInfo *segment_info; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if ((image->matte == MagickFalse) && (draw_info->fill.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if (((channel & OpacityChannel) != 0) || (draw_info->fill.opacity != OpaqueOpacity)) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=(MagickRealType) StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1)*cosine)+ fabs((double) (image->rows-1)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1),(image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) (MagickMax((image->columns-1), (image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1)* (image->columns-1)+(image->rows-1)*(image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1)/2.0; gradient->radii.y=(double) (image->rows-1)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) MagickMin((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **magick_restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,0.5); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict paint_indexes; register ssize_t x; register PixelPacket *magick_restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+u+i))); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (linear_image->columns+width); } *q=(*(p+j)); if (linear_image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); ConformMagickPixelPacket(image,fill,&conform_fill,exception); ConformMagickPixelPacket(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,&conform_target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(conform_fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(conform_fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(conform_fill.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(conform_fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(conform_fill.index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImageChannel) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
test_verify_tables.c
#include "config.h" #include <limits.h> #include <math.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <unistd.h> #include "kseq.h" KSEQ_INIT(int, read) #include "parasail.h" #include "parasail/cpuid.h" #include "parasail/memory.h" #include "parasail/matrix_lookup.h" #include "func_verify_tables.h" static int verbose = 0; typedef struct gap_score { int open; int extend; } gap_score_t; gap_score_t gap_scores[] = { {10,1}, {10,2}, {14,2}, {40,2}, {INT_MIN,INT_MIN} }; static inline void parse_sequences( const char *filename, char ***strings_, unsigned long **sizes_, unsigned long *count_) { FILE* fp; kseq_t *seq = NULL; int l = 0; char **strings = NULL; unsigned long *sizes = NULL; unsigned long count = 0; unsigned long memory = 1000; fp = fopen(filename, "r"); if(fp == NULL) { perror("fopen"); exit(1); } strings = malloc(sizeof(char*) * memory); sizes = malloc(sizeof(unsigned long) * memory); seq = kseq_init(fileno(fp)); while ((l = kseq_read(seq)) >= 0) { strings[count] = strdup(seq->seq.s); if (NULL == strings[count]) { perror("strdup"); exit(1); } sizes[count] = seq->seq.l; ++count; if (count >= memory) { char **new_strings = NULL; unsigned long *new_sizes = NULL; memory *= 2; new_strings = realloc(strings, sizeof(char*) * memory); if (NULL == new_strings) { perror("realloc"); exit(1); } strings = new_strings; new_sizes = realloc(sizes, sizeof(unsigned long) * memory); if (NULL == new_sizes) { perror("realloc"); exit(1); } sizes = new_sizes; } } kseq_destroy(seq); fclose(fp); *strings_ = strings; *sizes_ = sizes; *count_ = count; } static inline unsigned long binomial_coefficient( unsigned long n, unsigned long k) { /* from http://blog.plover.com/math/choose.html */ unsigned long r = 1; unsigned long d; if (k > n) { return 0; } for (d = 1; d <= k; d++) { r *= n--; r /= d; } return r; } static inline void k_combination2( unsigned long pos, unsigned long *a, unsigned long *b) { double s; double i = floor(sqrt(2.0 * pos)) - 1.0; if (i <= 1.0) { i = 1.0; } s = i * (i - 1.0) / 2.0; while (pos - s >= i) { s += i; i += 1; } *a = (unsigned long)(pos - s); *b = (unsigned long)(i); } static inline int diff_array( unsigned long s1Len, unsigned long s2Len, int *a, int *b) { unsigned long i = 0; unsigned long size = s1Len * s2Len; for (i=0; i<size; ++i) { if (a[i] != b[i]) return 1; } return 0; } static void check_functions( parasail_function_group_t f, char **sequences, unsigned long *sizes, unsigned long pair_limit, const parasail_matrix_t *matrix_, gap_score_t gap) { const parasail_function_info_t *functions = f.fs; unsigned long matrix_index = 0; unsigned long gap_index = 0; unsigned long function_index = 0; unsigned long pair_index = 0; parasail_function_t *reference_function = NULL; const parasail_matrix_t ** matrices = parasail_matrices; const parasail_matrix_t * single_matrix[] = { matrix_, NULL }; if (NULL != matrix_) { matrices = single_matrix; } printf("checking %s functions\n", f.name); for (matrix_index=0; NULL!=matrices[matrix_index]; ++matrix_index) { const parasail_matrix_t *matrix = matrices[matrix_index]; const char *matrixname = matrix->name; if (verbose) printf("\t%s\n", matrixname); for (gap_index=0; INT_MIN!=gap_scores[gap_index].open; ++gap_index) { int open = gap_scores[gap_index].open; int extend = gap_scores[gap_index].extend; if (gap.open != INT_MIN && gap.extend != INT_MIN) { open = gap.open; extend = gap.extend; } if (verbose) printf("\t\topen=%d extend=%d\n", open, extend); reference_function = functions[0].pointer; for (function_index=1; NULL!=functions[function_index].pointer; ++function_index) { if (verbose) printf("\t\t\t%s\n", functions[function_index].name); unsigned long saturated = 0; #pragma omp parallel for for (pair_index=0; pair_index<pair_limit; ++pair_index) { parasail_result_t *reference_result = NULL; parasail_result_t *result = NULL; unsigned long a = 0; unsigned long b = 1; k_combination2(pair_index, &a, &b); if (verbose) printf("\t\t\t\tpair=%lu (%lu,%lu)\n", pair_index, a, b); reference_result = reference_function( sequences[a], sizes[a], sequences[b], sizes[b], open, extend, matrix); result = functions[function_index].pointer( sequences[a], sizes[a], sequences[b], sizes[b], open, extend, matrix); if (result->saturated) { /* no point in comparing a result that saturated */ parasail_result_free(reference_result); parasail_result_free(result); #pragma omp atomic saturated += 1; continue; } if (reference_result->score != result->score) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) wrong score (%d!=%d)\n", functions[function_index].name, a, b, open, extend, matrixname, reference_result->score, result->score); } } if (diff_array( sizes[a], sizes[b], reference_result->score_table, result->score_table)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad score table\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->matches_table && diff_array( sizes[a], sizes[b], reference_result->matches_table, result->matches_table)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad matches table\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->similar_table && diff_array( sizes[a], sizes[b], reference_result->similar_table, result->similar_table)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad similar table\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->length_table && diff_array( sizes[a], sizes[b], reference_result->length_table, result->length_table)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad length table\n", functions[function_index].name, a, b, open, extend, matrixname); } } parasail_result_free(reference_result); parasail_result_free(result); } if (verbose && saturated) { printf("%s %d %d %s saturated %lu times\n", functions[function_index].name, open, extend, matrixname, saturated); } } if (gap.open != INT_MIN && gap.extend != INT_MIN) { /* user-specified gap, don't loop */ break; } } } } int main(int argc, char **argv) { unsigned long i = 0; unsigned long seq_count = 0; unsigned long limit = 0; char **sequences = NULL; unsigned long *sizes = NULL; char *endptr = NULL; char *filename = NULL; int c = 0; int test_scores = 1; int test_stats = 0; char *matrixname = NULL; const parasail_matrix_t *matrix = NULL; gap_score_t gap = {INT_MIN,INT_MIN}; while ((c = getopt(argc, argv, "f:m:n:o:e:vsS")) != -1) { switch (c) { case 'f': filename = optarg; break; case 'm': matrixname = optarg; break; case 'n': errno = 0; seq_count = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'o': errno = 0; gap.open = strtol(optarg, &endptr, 10); if (errno) { perror("strtol gap.open"); exit(1); } break; case 'e': errno = 0; gap.extend = strtol(optarg, &endptr, 10); if (errno) { perror("strtol gap.extend"); exit(1); } break; case 'v': verbose = 1; break; case 's': test_stats = 1; break; case 'S': test_scores = 0; break; case '?': if (optopt == 'f' || optopt == 'n') { fprintf(stderr, "Option -%c requires an argument.\n", optopt); } else if (isprint(optopt)) { fprintf(stderr, "Unknown option `-%c'.\n", optopt); } else { fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt); } exit(1); default: fprintf(stderr, "default case in getopt\n"); exit(1); } } if (filename) { parse_sequences(filename, &sequences, &sizes, &seq_count); } else { fprintf(stderr, "no filename specified\n"); exit(1); } /* select the matrix */ if (matrixname) { matrix = parasail_matrix_lookup(matrixname); if (NULL == matrix) { fprintf(stderr, "Specified substitution matrix not found.\n"); exit(1); } } limit = binomial_coefficient(seq_count, 2); printf("%lu choose 2 is %lu\n", seq_count, limit); #if HAVE_SSE2 if (parasail_can_use_sse2()) { if (test_scores) { check_functions(parasail_nw_table_sse2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_table_sse2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_table_sse2, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_table_sse2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_table_sse2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_table_sse2, sequences, sizes, limit, matrix, gap); } } #endif #if HAVE_SSE41 if (parasail_can_use_sse41()) { if (test_scores) { check_functions(parasail_nw_table_sse41, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_table_sse41, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_table_sse41, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_table_sse41, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_table_sse41, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_table_sse41, sequences, sizes, limit, matrix, gap); } } #endif #if HAVE_AVX2 if (parasail_can_use_avx2()) { if (test_scores) { check_functions(parasail_nw_table_avx2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_table_avx2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_table_avx2, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_table_avx2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_table_avx2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_table_avx2, sequences, sizes, limit, matrix, gap); } } #endif #if HAVE_KNC { if (test_scores) { check_functions(parasail_nw_table_knc, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_table_knc, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_table_knc, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_table_knc, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_table_knc, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_table_knc, sequences, sizes, limit, matrix, gap); } } #endif if (test_scores) { check_functions(parasail_nw_table_disp, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_table_disp, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_table_disp, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_table_disp, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_table_disp, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_table_disp, sequences, sizes, limit, matrix, gap); } for (i=0; i<seq_count; ++i) { free(sequences[i]); } free(sequences); free(sizes); return 0; }
mediancut.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdlib.h> #include <stddef.h> #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #define index_of_channel(ch) (offsetof(f_pixel,ch)/sizeof(float)) static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]); struct box { f_pixel color; f_pixel variance; double sum, total_error, max_error; unsigned int ind; unsigned int colors; }; ALWAYS_INLINE static double variance_diff(double val, const double good_enough); inline static double variance_diff(double val, const double good_enough) { val *= val; if (val < good_enough*good_enough) return val*0.25; return val; } /** Weighted per-channel variance of the box. It's used to decide which channel to split by */ static f_pixel box_variance(const hist_item achv[], const struct box *box) { f_pixel mean = box->color; double variancea=0, variancer=0, varianceg=0, varianceb=0; for(unsigned int i = 0; i < box->colors; ++i) { const f_pixel px = achv[box->ind + i].acolor; double weight = achv[box->ind + i].adjusted_weight; variancea += variance_diff(mean.a - px.a, 2.0/256.0)*weight; variancer += variance_diff(mean.r - px.r, 1.0/256.0)*weight; varianceg += variance_diff(mean.g - px.g, 1.0/256.0)*weight; varianceb += variance_diff(mean.b - px.b, 1.0/256.0)*weight; } return (f_pixel){ .a = variancea*(4.0/16.0), .r = variancer*(7.0/16.0), .g = varianceg*(9.0/16.0), .b = varianceb*(5.0/16.0), }; } static double box_max_error(const hist_item achv[], const struct box *box) { f_pixel mean = box->color; double max_error = 0; for(unsigned int i = 0; i < box->colors; ++i) { const double diff = colordifference(mean, achv[box->ind + i].acolor); if (diff > max_error) { max_error = diff; } } return max_error; } ALWAYS_INLINE static double color_weight(f_pixel median, hist_item h); static inline void hist_item_swap(hist_item *l, hist_item *r) { if (l != r) { hist_item t = *l; *l = *r; *r = t; } } ALWAYS_INLINE static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len); inline static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len) { if (len < 32) { return len/2; } const unsigned int aidx=8, bidx=len/2, cidx=len-1; const unsigned int a=base[aidx].tmp.sort_value, b=base[bidx].tmp.sort_value, c=base[cidx].tmp.sort_value; return (a < b) ? ((b < c) ? bidx : ((a < c) ? cidx : aidx )) : ((b > c) ? bidx : ((a < c) ? aidx : cidx )); } ALWAYS_INLINE static unsigned int qsort_partition(hist_item *const base, const unsigned int len); inline static unsigned int qsort_partition(hist_item *const base, const unsigned int len) { unsigned int l = 1, r = len; if (len >= 8) { hist_item_swap(&base[0], &base[qsort_pivot(base,len)]); } const unsigned int pivot_value = base[0].tmp.sort_value; while (l < r) { if (base[l].tmp.sort_value >= pivot_value) { l++; } else { while(l < --r && base[r].tmp.sort_value <= pivot_value) {} hist_item_swap(&base[l], &base[r]); } } l--; hist_item_swap(&base[0], &base[l]); return l; } /** quick select algorithm */ static void hist_item_sort_range(hist_item base[], unsigned int len, unsigned int sort_start) { for(;;) { const unsigned int l = qsort_partition(base, len), r = l+1; if (l > 0 && sort_start < l) { len = l; } else if (r < len && sort_start > r) { base += r; len -= r; sort_start -= r; } else break; } } /** sorts array to make sum of weights lower than halfvar one side, returns edge between <halfvar and >halfvar parts of the set */ static hist_item *hist_item_sort_halfvar(hist_item base[], unsigned int len, double *const lowervar, const double halfvar) { do { const unsigned int l = qsort_partition(base, len), r = l+1; // check if sum of left side is smaller than half, // if it is, then it doesn't need to be sorted unsigned int t = 0; double tmpsum = *lowervar; while (t <= l && tmpsum < halfvar) tmpsum += base[t++].color_weight; if (tmpsum < halfvar) { *lowervar = tmpsum; } else { if (l > 0) { hist_item *res = hist_item_sort_halfvar(base, l, lowervar, halfvar); if (res) return res; } else { // End of left recursion. This will be executed in order from the first element. *lowervar += base[0].color_weight; if (*lowervar > halfvar) return &base[0]; } } if (len > r) { base += r; len -= r; // tail-recursive "call" } else { *lowervar += base[r].color_weight; return (*lowervar > halfvar) ? &base[r] : NULL; } } while(1); } static f_pixel get_median(const struct box *b, hist_item achv[]); typedef struct { unsigned int chan; float variance; } channelvariance; static int comparevariance(const void *ch1, const void *ch2) { return ((const channelvariance*)ch1)->variance > ((const channelvariance*)ch2)->variance ? -1 : (((const channelvariance*)ch1)->variance < ((const channelvariance*)ch2)->variance ? 1 : 0); } /** Finds which channels need to be sorted first and preproceses achv for fast sort */ static double prepare_sort(struct box *b, hist_item achv[]) { /* ** Sort dimensions by their variance, and then sort colors first by dimension with highest variance */ channelvariance channels[4] = { {index_of_channel(a), b->variance.a}, {index_of_channel(r), b->variance.r}, {index_of_channel(g), b->variance.g}, {index_of_channel(b), b->variance.b}, }; qsort(channels, 4, sizeof(channels[0]), comparevariance); const unsigned int ind1 = b->ind; const unsigned int colors = b->colors; #if __GNUC__ >= 9 #pragma omp parallel for if (colors > 25000) \ schedule(static) default(none) shared(achv, channels, colors, ind1) #else #pragma omp parallel for if (colors > 25000) \ schedule(static) default(none) shared(achv, channels) #endif for(unsigned int i=0; i < colors; i++) { const float *chans = (const float *)&achv[ind1 + i].acolor; // Only the first channel really matters. When trying median cut many times // with different histogram weights, I don't want sort randomness to influence outcome. achv[ind1 + i].tmp.sort_value = ((unsigned int)(chans[channels[0].chan]*65535.0)<<16) | (unsigned int)((chans[channels[2].chan] + chans[channels[1].chan]/2.0 + chans[channels[3].chan]/4.0)*65535.0); } const f_pixel median = get_median(b, achv); // box will be split to make color_weight of each side even const unsigned int ind = b->ind, end = ind+b->colors; double totalvar = 0; #pragma omp parallel for if (end - ind > 15000) \ schedule(static) default(shared) reduction(+:totalvar) for(unsigned int j=ind; j < end; j++) totalvar += (achv[j].color_weight = color_weight(median, achv[j])); return totalvar / 2.0; } /** finds median in unsorted set by sorting only minimum required */ static f_pixel get_median(const struct box *b, hist_item achv[]) { const unsigned int median_start = (b->colors-1)/2; hist_item_sort_range(&(achv[b->ind]), b->colors, median_start); if (b->colors&1) return achv[b->ind + median_start].acolor; // technically the second color is not guaranteed to be sorted correctly // but most of the time it is good enough to be useful return averagepixels(2, &achv[b->ind + median_start]); } /* ** Find the best splittable box. -1 if no boxes are splittable. */ static int best_splittable_box(struct box bv[], unsigned int boxes, const double max_mse) { int bi=-1; double maxsum=0; for(unsigned int i=0; i < boxes; i++) { if (bv[i].colors < 2) { continue; } // looks only at max variance, because it's only going to split by it const double cv = MAX(bv[i].variance.r, MAX(bv[i].variance.g,bv[i].variance.b)); double thissum = bv[i].sum * MAX(bv[i].variance.a, cv); if (bv[i].max_error > max_mse) { thissum = thissum* bv[i].max_error/max_mse; } if (thissum > maxsum) { maxsum = thissum; bi = i; } } return bi; } inline static double color_weight(f_pixel median, hist_item h) { float diff = colordifference(median, h.acolor); return sqrt(diff) * (sqrt(1.0+h.adjusted_weight)-1.0); } static void set_colormap_from_boxes(colormap *map, struct box bv[], unsigned int boxes, hist_item *achv); static void adjust_histogram(hist_item *achv, const struct box bv[], unsigned int boxes); static double box_error(const struct box *box, const hist_item achv[]) { f_pixel avg = box->color; double total_error=0; for (unsigned int i = 0; i < box->colors; ++i) { total_error += colordifference(avg, achv[box->ind + i].acolor) * achv[box->ind + i].perceptual_weight; } return total_error; } static bool total_box_error_below_target(double target_mse, struct box bv[], unsigned int boxes, const histogram *hist) { target_mse *= hist->total_perceptual_weight; double total_error=0; for(unsigned int i=0; i < boxes; i++) { // error is (re)calculated lazily if (bv[i].total_error >= 0) { total_error += bv[i].total_error; } if (total_error > target_mse) return false; } for(unsigned int i=0; i < boxes; i++) { if (bv[i].total_error < 0) { bv[i].total_error = box_error(&bv[i], hist->achv); total_error += bv[i].total_error; } if (total_error > target_mse) return false; } return true; } static void box_init(struct box *box, const hist_item *achv, const unsigned int ind, const unsigned int colors, const double sum) { box->ind = ind; box->colors = colors; box->sum = sum; box->total_error = -1; box->color = averagepixels(colors, &achv[ind]); #pragma omp task if (colors > 5000) box->variance = box_variance(achv, box); #pragma omp task if (colors > 8000) box->max_error = box_max_error(achv, box); } /* ** Here is the fun part, the median-cut colormap generator. This is based ** on Paul Heckbert's paper, "Color Image Quantization for Frame Buffer ** Display," SIGGRAPH 1982 Proceedings, page 297. */ LIQ_PRIVATE colormap *mediancut(histogram *hist, unsigned int newcolors, const double target_mse, const double max_mse, void* (*malloc)(size_t), void (*free)(void*)) { hist_item *achv = hist->achv; LIQ_ARRAY(struct box, bv, newcolors); unsigned int boxes = 1; /* ** Set up the initial box. */ #pragma omp parallel #pragma omp single { double sum = 0; for(unsigned int i=0; i < hist->size; i++) { sum += achv[i].adjusted_weight; } #pragma omp taskgroup { box_init(&bv[0], achv, 0, hist->size, sum); } /* ** Main loop: split boxes until we have enough. */ while (boxes < newcolors) { // first splits boxes that exceed quality limit (to have colors for things like odd green pixel), // later raises the limit to allow large smooth areas/gradients get colors. const double current_max_mse = max_mse + (boxes/(double)newcolors)*16.0*max_mse; const int bi = best_splittable_box(bv, boxes, current_max_mse); if (bi < 0) { break; /* ran out of colors! */ } unsigned int indx = bv[bi].ind; unsigned int clrs = bv[bi].colors; /* Classic implementation tries to get even number of colors or pixels in each subdivision. Here, instead of popularity I use (sqrt(popularity)*variance) metric. Each subdivision balances number of pixels (popular colors) and low variance - boxes can be large if they have similar colors. Later boxes with high variance will be more likely to be split. Median used as expected value gives much better results than mean. */ const double halfvar = prepare_sort(&bv[bi], achv); double lowervar=0; // hist_item_sort_halfvar sorts and sums lowervar at the same time // returns item to break at …minus one, which does smell like an off-by-one error. hist_item *break_p = hist_item_sort_halfvar(&achv[indx], clrs, &lowervar, halfvar); unsigned int break_at = MIN(clrs-1, break_p - &achv[indx] + 1); /* ** Split the box. */ double sm = bv[bi].sum; double lowersum = 0; for(unsigned int i=0; i < break_at; i++) lowersum += achv[indx + i].adjusted_weight; #pragma omp taskgroup { box_init(&bv[bi], achv, indx, break_at, lowersum); box_init(&bv[boxes], achv, indx + break_at, clrs - break_at, sm - lowersum); } ++boxes; if (total_box_error_below_target(target_mse, bv, boxes, hist)) { break; } } } colormap *map = pam_colormap(boxes, malloc, free); set_colormap_from_boxes(map, bv, boxes, achv); adjust_histogram(achv, bv, boxes); return map; } static void set_colormap_from_boxes(colormap *map, struct box* bv, unsigned int boxes, hist_item *achv) { /* ** Ok, we've got enough boxes. Now choose a representative color for ** each box. There are a number of possible ways to make this choice. ** One would be to choose the center of the box; this ignores any structure ** within the boxes. Another method would be to average all the colors in ** the box - this is the method specified in Heckbert's paper. */ for(unsigned int bi = 0; bi < boxes; ++bi) { map->palette[bi].acolor = bv[bi].color; /* store total color popularity (perceptual_weight is approximation of it) */ map->palette[bi].popularity = 0; for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) { map->palette[bi].popularity += achv[i].perceptual_weight; } } } /* increase histogram popularity by difference from the final color (this is used as part of feedback loop) */ static void adjust_histogram(hist_item *achv, const struct box* bv, unsigned int boxes) { for(unsigned int bi = 0; bi < boxes; ++bi) { for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) { achv[i].tmp.likely_colormap_index = bi; } } } static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]) { double r = 0, g = 0, b = 0, a = 0, sum = 0; #pragma omp parallel for if (clrs > 25000) \ schedule(static) default(shared) reduction(+:a) reduction(+:r) reduction(+:g) reduction(+:b) reduction(+:sum) for(unsigned int i = 0; i < clrs; i++) { const f_pixel px = achv[i].acolor; const double weight = achv[i].adjusted_weight; sum += weight; a += px.a * weight; r += px.r * weight; g += px.g * weight; b += px.b * weight; } if (sum) { a /= sum; r /= sum; g /= sum; b /= sum; } assert(!isnan(r) && !isnan(g) && !isnan(b) && !isnan(a)); return (f_pixel){.r=r, .g=g, .b=b, .a=a}; }
bicubic_interpolation.c
// This program is free software: you can use, modify and/or redistribute it // under the terms of the simplified BSD License. You should have received a // copy of this license along this program. If not, see // <http://www.opensource.org/licenses/bsd-license.html>. // // Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es> // All rights reserved. #ifndef BICUBIC_INTERPOLATION_C #define BICUBIC_INTERPOLATION_C #include <stdbool.h> #define BOUNDARY_CONDITION 0 //0 Neumann //1 Periodic //2 Symmetric /** * * Neumann boundary condition test * **/ static int neumann_bc(int x, int nx, bool *out) { if(x < 0) { x = 0; *out = true; } else if (x >= nx) { x = nx - 1; *out = true; } return x; } /** * * Periodic boundary condition test * **/ static int periodic_bc(int x, int nx, bool *out) { if(x < 0) { const int n = 1 - (int)(x/(nx+1)); const int ixx = x + n * nx; x = ixx% nx; *out = true; } else if(x >= nx) { x = x % nx; *out = true; } return x; } /** * * Symmetric boundary condition test * **/ static int symmetric_bc(int x, int nx, bool *out) { if(x < 0) { const int borde = nx - 1; const int xx = -x; const int n = (int)(xx/borde) % 2; if ( n ) x = borde - ( xx % borde ); else x = xx % borde; *out = true; } else if ( x >= nx ) { const int borde = nx - 1; const int n = (int)(x/borde) % 2; if ( n ) x = borde - ( x % borde ); else x = x % borde; *out = true; } return x; } /** * * Cubic interpolation in one dimension * **/ static double cubic_interpolation_cell ( double v[4], //interpolation points double x //point to be interpolated ) { return v[1] + 0.5 * x * (v[2] - v[0] + x * (2.0 * v[0] - 5.0 * v[1] + 4.0 * v[2] - v[3] + x * (3.0 * (v[1] - v[2]) + v[3] - v[0]))); } /** * * Bicubic interpolation in two dimensions * **/ static double bicubic_interpolation_cell ( double p[4][4], //array containing the interpolation points double x, //x position to be interpolated double y //y position to be interpolated ) { double v[4]; v[0] = cubic_interpolation_cell(p[0], y); v[1] = cubic_interpolation_cell(p[1], y); v[2] = cubic_interpolation_cell(p[2], y); v[3] = cubic_interpolation_cell(p[3], y); return cubic_interpolation_cell(v, x); } /** * * Compute the bicubic interpolation of a point in an image. * Detect if the point goes outside the image domain. * **/ float bicubic_interpolation_at( const float *input, //image to be interpolated const float uu, //x component of the vector field const float vv, //y component of the vector field const int nx, //image width const int ny, //image height bool border_out //if true, return zero outside the region ) { const int sx = (uu < 0)? -1: 1; const int sy = (vv < 0)? -1: 1; int x, y, mx, my, dx, dy, ddx, ddy; bool out[1] = {false}; //apply the corresponding boundary conditions switch(BOUNDARY_CONDITION) { case 0: x = neumann_bc((int) uu, nx, out); y = neumann_bc((int) vv, ny, out); mx = neumann_bc((int) uu - sx, nx, out); my = neumann_bc((int) vv - sx, ny, out); dx = neumann_bc((int) uu + sx, nx, out); dy = neumann_bc((int) vv + sy, ny, out); ddx = neumann_bc((int) uu + 2*sx, nx, out); ddy = neumann_bc((int) vv + 2*sy, ny, out); break; case 1: x = periodic_bc((int) uu, nx, out); y = periodic_bc((int) vv, ny, out); mx = periodic_bc((int) uu - sx, nx, out); my = periodic_bc((int) vv - sx, ny, out); dx = periodic_bc((int) uu + sx, nx, out); dy = periodic_bc((int) vv + sy, ny, out); ddx = periodic_bc((int) uu + 2*sx, nx, out); ddy = periodic_bc((int) vv + 2*sy, ny, out); break; case 2: x = symmetric_bc((int) uu, nx, out); y = symmetric_bc((int) vv, ny, out); mx = symmetric_bc((int) uu - sx, nx, out); my = symmetric_bc((int) vv - sx, ny, out); dx = symmetric_bc((int) uu + sx, nx, out); dy = symmetric_bc((int) vv + sy, ny, out); ddx = symmetric_bc((int) uu + 2*sx, nx, out); ddy = symmetric_bc((int) vv + 2*sy, ny, out); break; default:x = neumann_bc((int) uu, nx, out); y = neumann_bc((int) vv, ny, out); mx = neumann_bc((int) uu - sx, nx, out); my = neumann_bc((int) vv - sx, ny, out); dx = neumann_bc((int) uu + sx, nx, out); dy = neumann_bc((int) vv + sy, ny, out); ddx = neumann_bc((int) uu + 2*sx, nx, out); ddy = neumann_bc((int) vv + 2*sy, ny, out); break; } if(*out && border_out) return 0.0; else { //obtain the interpolation points of the image const float p11 = input[mx + nx * my]; const float p12 = input[x + nx * my]; const float p13 = input[dx + nx * my]; const float p14 = input[ddx + nx * my]; const float p21 = input[mx + nx * y]; const float p22 = input[x + nx * y]; const float p23 = input[dx + nx * y]; const float p24 = input[ddx + nx * y]; const float p31 = input[mx + nx * dy]; const float p32 = input[x + nx * dy]; const float p33 = input[dx + nx * dy]; const float p34 = input[ddx + nx * dy]; const float p41 = input[mx + nx * ddy]; const float p42 = input[x + nx * ddy]; const float p43 = input[dx + nx * ddy]; const float p44 = input[ddx + nx * ddy]; //create array double pol[4][4] = { {p11, p21, p31, p41}, {p12, p22, p32, p42}, {p13, p23, p33, p43}, {p14, p24, p34, p44} }; //return interpolation return bicubic_interpolation_cell(pol, uu-x, vv-y); } } /** * * Compute the bicubic interpolation of an image. * **/ void bicubic_interpolation_warp( const float *input, //image to be warped const float *u, //x component of the vector field const float *v, //y component of the vector field float *output, //warped output image with bicubic interpolation const int nx, //image width const int ny, //image height bool border_out//if true, put zeros outside the region ) { #pragma omp parallel for for(int i = 0; i < ny; i++) for(int j = 0; j < nx; j++) { const int p = i * nx + j; const float uu = (float) (j + u[p]); const float vv = (float) (i + v[p]); //obtain the bicubic interpolation at position (uu, vv) output[p] = bicubic_interpolation_at(input, uu, vv, nx, ny, border_out); } } #endif//BICUBIC_INTERPOLATION_C
exdot_omp.h
/* * %%%%%%%%%%%%%%%%%%%%%%%Original development%%%%%%%%%%%%%%%%%%%%%%%%% * Copyright (c) 2016 Inria and University Pierre and Marie Curie * %%%%%%%%%%%%%%%%%%%%%%%Modifications and further additions%%%%%%%%%% * Matthias Wiesenberger, 2017, within FELTOR and EXBLAS licenses */ /** * @file exdot_omp.h * @brief OpenMP version of exdot * * @authors * Developers : \n * Roman Iakymchuk -- roman.iakymchuk@lip6.fr \n * Sylvain Collange -- sylvain.collange@inria.fr \n * Matthias Wiesenberger -- mattwi@fysik.dtu.dk */ #pragma once #include <cassert> #include <cstdlib> #include <cstdio> #include <cmath> #include <iostream> #include "accumulate.h" #include "ExSUM.FPE.hpp" #include <omp.h> namespace dg { namespace exblas{ ///@cond namespace cpu{ //MW: does this implementation code a manual lock? /** * \brief Parallel reduction step * * \param step step among threads * \param acc1 superaccumulator of the first thread * \param acc2 superaccumulator of the second thread */ inline static void ReductionStep(int step, int64_t * acc1, int64_t * acc2, int volatile * ready) { #ifndef _WITHOUT_VCL _mm_prefetch((char const*)ready, _MM_HINT_T0); // Wait for thread 2 to be ready while(*ready < step) { // wait _mm_pause(); } #endif//_WITHOUT_VCL int imin = IMIN, imax = IMAX; Normalize( acc1, imin, imax); imin = IMIN, imax = IMAX; Normalize( acc2, imin, imax); for(int i = IMIN; i <= IMAX; ++i) { acc1[i] += acc2[i]; } } /** * \brief Final step of summation -- Parallel reduction among threads * * \param tid thread ID * \param tnum number of threads * \param acc superaccumulator */ inline static void Reduction(unsigned int tid, unsigned int tnum, std::vector<int32_t>& ready, std::vector<int64_t>& acc, int const linesize) { // Custom tree reduction for(unsigned int s = 1; (unsigned)(1 << (s-1)) < tnum; ++s) { // 1<<(s-1) = 0001, 0010, 0100, ... = 1,2,4,8,16,... int32_t volatile * c = &ready[tid * linesize]; ++*c; //set: ready for level s #ifdef _WITHOUT_VCL #pragma omp barrier //all threads are ready for level s #endif if(tid % (1 << s) == 0) { //1<<s = 2,4,8,16,32,... //only the tid thread executes this block, tid2 just sets ready unsigned int tid2 = tid | (1 << (s-1)); //effectively adds 1, 2, 4,... if(tid2 < tnum) { ReductionStep(s, &acc[tid*BIN_COUNT], &acc[tid2*BIN_COUNT], &ready[tid2 * linesize]); } } } } template<typename CACHE, typename PointerOrValue1, typename PointerOrValue2> void ExDOTFPE(int N, PointerOrValue1 a, PointerOrValue2 b, int64_t* h_superacc, bool* err) { // OpenMP sum+reduction int const linesize = 16; // * sizeof(int32_t) int maxthreads = omp_get_max_threads(); std::vector<int64_t> acc(maxthreads*BIN_COUNT,0); std::vector<int32_t> ready(maxthreads * linesize); std::vector<bool> error( maxthreads, false); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); unsigned int tnum = omp_get_num_threads(); CACHE cache(&acc[tid*BIN_COUNT]); *(int32_t volatile *)(&ready[tid * linesize]) = 0; // Race here, who cares? #ifndef _WITHOUT_VCL int l = ((tid * int64_t(N)) / tnum) & ~7ul; // & ~7ul == round down to multiple of 8 int r = ((((tid+1) * int64_t(N)) / tnum) & ~7ul) - 1; for(int i = l; i < r; i+=8) { #ifndef _MSC_VER asm ("# myloop"); #endif //vcl::Vec8d r1 ; //vcl::Vec8d x = TwoProductFMA(make_vcl_vec8d(a,i), make_vcl_vec8d(b,i), r1); vcl::Vec8d x = make_vcl_vec8d(a,i)*make_vcl_vec8d(b,i); //MW: check sanity of input vcl::Vec8db finite = vcl::is_finite( x); if( !vcl::horizontal_and( finite) ) error[tid] = true; cache.Accumulate(x); //cache.Accumulate(r1); //MW: exact product but halfs the speed } if( tid+1==tnum && r != N-1) { r+=1; //accumulate remainder //vcl::Vec8d r1; //vcl::Vec8d x = TwoProductFMA(make_vcl_vec8d(a,r,N-r), make_vcl_vec8d(b,r,N-r), r1); vcl::Vec8d x = make_vcl_vec8d(a,r,N-r)*make_vcl_vec8d(b,r,N-r); //MW: check sanity of input vcl::Vec8db finite = vcl::is_finite( x); if( !vcl::horizontal_and( finite) ) error[tid] = true; cache.Accumulate(x); //cache.Accumulate(r1); } #else// _WITHOUT_VCL int l = ((tid * int64_t(N)) / tnum); int r = ((((tid+1) * int64_t(N)) / tnum) ) - 1; for(int i = l; i <= r; i++) { //double r1; //double x = TwoProductFMA(get_element(a,i),get_element(b,i),r1); double x = get_element(a,i)*get_element(b,i); cache.Accumulate(x); //cache.Accumulate(r1); } #endif// _WITHOUT_VCL cache.Flush(); int imin=IMIN, imax=IMAX; Normalize(&acc[tid*BIN_COUNT], imin, imax); Reduction(tid, tnum, ready, acc, linesize); } for( int i=IMIN; i<=IMAX; i++) h_superacc[i] = acc[i]; for ( int i=0; i<maxthreads; i++) if( error[i] == true) *err = true; } template<typename CACHE, typename PointerOrValue1, typename PointerOrValue2, typename PointerOrValue3> void ExDOTFPE(int N, PointerOrValue1 a, PointerOrValue2 b, PointerOrValue3 c, int64_t* h_superacc, bool* err) { // OpenMP sum+reduction int const linesize = 16; // * sizeof(int32_t) (MW avoid false sharing?) int maxthreads = omp_get_max_threads(); std::vector<int64_t> acc(maxthreads*BIN_COUNT,0); std::vector<int32_t> ready(maxthreads * linesize); std::vector<bool> error( maxthreads, false); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); unsigned int tnum = omp_get_num_threads(); CACHE cache(&acc[tid*BIN_COUNT]); *(int32_t volatile *)(&ready[tid * linesize]) = 0; // Race here, who cares? #ifndef _WITHOUT_VCL int l = ((tid * int64_t(N)) / tnum) & ~7ul;// & ~7ul == round down to multiple of 8 int r = ((((tid+1) * int64_t(N)) / tnum) & ~7ul) - 1; for(int i = l; i < r; i+=8) { #ifndef _MSC_VER asm ("# myloop"); #endif //vcl::Vec8d r1 , r2, cvec = vcl::Vec8d().load(c+i); //vcl::Vec8d x = TwoProductFMA(vcl::Vec8d().load(a+i), vcl::Vec8d().load(b+i), r1); //vcl::Vec8d x2 = TwoProductFMA(x , cvec, r2); //vcl::Vec8d x1 = vcl::mul_add(vcl::Vec8d().load(a+i),vcl::Vec8d().load(b+i), 0); //vcl::Vec8d x2 = vcl::mul_add( x1 ,vcl::Vec8d().load(c+i), 0); vcl::Vec8d x1 = make_vcl_vec8d(a,i)*make_vcl_vec8d(b,i); vcl::Vec8d x2 = x1 *make_vcl_vec8d(c,i); vcl::Vec8db finite = vcl::is_finite( x2); if( !vcl::horizontal_and( finite) ) error[tid] = true; cache.Accumulate(x2); //cache.Accumulate(r2); //x2 = TwoProductFMA(r1, cvec, r2); //cache.Accumulate(x2); //cache.Accumulate(r2); } if( tid+1 == tnum && r != N-1) { r+=1; //accumulate remainder //vcl::Vec8d r1 , r2, cvec = vcl::Vec8d().load_partial(N-r, c+r); //vcl::Vec8d x = TwoProductFMA(vcl::Vec8d().load_partial(N-r, a+r), vcl::Vec8d().load_partial(N-r,b+r), r1); //vcl::Vec8d x2 = TwoProductFMA(x , cvec, r2); //vcl::Vec8d x1 = vcl::mul_add(vcl::Vec8d().load_partial(N-r, a+r),vcl::Vec8d().load_partial(N-r,b+r), 0); //vcl::Vec8d x2 = vcl::mul_add( x1 ,vcl::Vec8d().load_partial(N-r,c+r), 0); vcl::Vec8d x1 = make_vcl_vec8d(a,r,N-r)*make_vcl_vec8d(b,r,N-r); vcl::Vec8d x2 = x1 *make_vcl_vec8d(c,r,N-r); vcl::Vec8db finite = vcl::is_finite( x2); if( !vcl::horizontal_and( finite) ) error[tid] = true; cache.Accumulate(x2); //cache.Accumulate(r2); //x2 = TwoProductFMA(r1, cvec, r2); //cache.Accumulate(x2); //cache.Accumulate(r2); } #else// _WITHOUT_VCL int l = ((tid * int64_t(N)) / tnum); int r = ((((tid+1) * int64_t(N)) / tnum) ) - 1; for(int i = l; i <= r; i++) { //double x1 = a[i]*b[i]; //double x2 = x1*c[i]; double x1 = get_element(a,i)*get_element(b,i); double x2 = x1*get_element(c,i); cache.Accumulate(x2); } #endif// _WITHOUT_VCL cache.Flush(); int imin=IMIN, imax=IMAX; Normalize(&acc[tid*BIN_COUNT], imin, imax); Reduction(tid, tnum, ready, acc, linesize); } for( int i=IMIN; i<=IMAX; i++) h_superacc[i] = acc[i]; for ( int i=0; i<maxthreads; i++) if( error[i] == true) *err = true; } }//namespace cpu ///@endcond ///@brief OpenMP parallel version of exact triple dot product ///@copydoc hide_exdot2 ///@copydoc hide_hostacc template<class PointerOrValue1, class PointerOrValue2, size_t NBFPE=8> void exdot_omp(unsigned size, PointerOrValue1 x1_ptr, PointerOrValue2 x2_ptr, int64_t* h_superacc, int* status){ static_assert( has_floating_value<PointerOrValue1>::value, "PointerOrValue1 needs to be T or T* with T one of (const) float or (const) double"); static_assert( has_floating_value<PointerOrValue2>::value, "PointerOrValue2 needs to be T or T* with T one of (const) float or (const) double"); bool error = false; #ifndef _WITHOUT_VCL cpu::ExDOTFPE<cpu::FPExpansionVect<vcl::Vec8d, NBFPE, cpu::FPExpansionTraits<true> > >((int)size,x1_ptr,x2_ptr, h_superacc, &error); #else cpu::ExDOTFPE<cpu::FPExpansionVect<double, NBFPE, cpu::FPExpansionTraits<true> > >((int)size,x1_ptr,x2_ptr, h_superacc, &error); #endif//_WITHOUT_VCL *status = 0; if( error ) *status = 1; } ///@brief OpenMP parallel version of exact triple dot product ///@copydoc hide_exdot3 ///@copydoc hide_hostacc template<class PointerOrValue1, class PointerOrValue2, class PointerOrValue3, size_t NBFPE=8> void exdot_omp(unsigned size, PointerOrValue1 x1_ptr, PointerOrValue2 x2_ptr, PointerOrValue3 x3_ptr, int64_t* h_superacc, int* status) { static_assert( has_floating_value<PointerOrValue1>::value, "PointerOrValue1 needs to be T or T* with T one of (const) float or (const) double"); static_assert( has_floating_value<PointerOrValue2>::value, "PointerOrValue2 needs to be T or T* with T one of (const) float or (const) double"); static_assert( has_floating_value<PointerOrValue3>::value, "PointerOrValue3 needs to be T or T* with T one of (const) float or (const) double"); bool error = false; #ifndef _WITHOUT_VCL cpu::ExDOTFPE<cpu::FPExpansionVect<vcl::Vec8d, NBFPE, cpu::FPExpansionTraits<true> > >((int)size,x1_ptr,x2_ptr, x3_ptr, h_superacc, &error); #else cpu::ExDOTFPE<cpu::FPExpansionVect<double, NBFPE, cpu::FPExpansionTraits<true> > >((int)size,x1_ptr,x2_ptr, x3_ptr, h_superacc, &error); #endif//_WITHOUT_VCL *status = 0; if( error ) *status = 1; } }//namespace exblas } //namespace dg
convolution_1x1_pack4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-4a-inch/4a-outch/4b kernel_tm_pack4.create(2 * 1, inch / 4, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16); int q = 0; for (; q + 7 < outch; q += 8) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; const float* k4 = (const float*)kernel + (q + 4) * inch; const float* k5 = (const float*)kernel + (q + 5) * inch; const float* k6 = (const float*)kernel + (q + 6) * inch; const float* k7 = (const float*)kernel + (q + 7) * inch; __fp16* g0 = kernel_tm_pack4.channel(q / 8); for (int p = 0; p + 3 < inch; p += 4) { g0[0] = (__fp16)k0[0]; g0[1] = (__fp16)k1[0]; g0[2] = (__fp16)k2[0]; g0[3] = (__fp16)k3[0]; g0[4] = (__fp16)k4[0]; g0[5] = (__fp16)k5[0]; g0[6] = (__fp16)k6[0]; g0[7] = (__fp16)k7[0]; g0[8] = (__fp16)k0[1]; g0[9] = (__fp16)k1[1]; g0[10] = (__fp16)k2[1]; g0[11] = (__fp16)k3[1]; g0[12] = (__fp16)k4[1]; g0[13] = (__fp16)k5[1]; g0[14] = (__fp16)k6[1]; g0[15] = (__fp16)k7[1]; g0[16] = (__fp16)k0[2]; g0[17] = (__fp16)k1[2]; g0[18] = (__fp16)k2[2]; g0[19] = (__fp16)k3[2]; g0[20] = (__fp16)k4[2]; g0[21] = (__fp16)k5[2]; g0[22] = (__fp16)k6[2]; g0[23] = (__fp16)k7[2]; g0[24] = (__fp16)k0[3]; g0[25] = (__fp16)k1[3]; g0[26] = (__fp16)k2[3]; g0[27] = (__fp16)k3[3]; g0[28] = (__fp16)k4[3]; g0[29] = (__fp16)k5[3]; g0[30] = (__fp16)k6[3]; g0[31] = (__fp16)k7[3]; k0 += 4; k1 += 4; k2 += 4; k3 += 4; k4 += 4; k5 += 4; k6 += 4; k7 += 4; g0 += 32; } } for (; q + 3 < outch; q += 4) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; __fp16* g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); for (int p = 0; p + 3 < inch; p += 4) { g0[0] = (__fp16)k0[0]; g0[1] = (__fp16)k1[0]; g0[2] = (__fp16)k2[0]; g0[3] = (__fp16)k3[0]; g0[4] = (__fp16)k0[1]; g0[5] = (__fp16)k1[1]; g0[6] = (__fp16)k2[1]; g0[7] = (__fp16)k3[1]; g0[8] = (__fp16)k0[2]; g0[9] = (__fp16)k1[2]; g0[10] = (__fp16)k2[2]; g0[11] = (__fp16)k3[2]; g0[12] = (__fp16)k0[3]; g0[13] = (__fp16)k1[3]; g0[14] = (__fp16)k2[3]; g0[15] = (__fp16)k3[3]; k0 += 4; k1 += 4; k2 += 4; k3 += 4; g0 += 16; } } } static void conv1x1s1_sgemm_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const __fp16* bias = _bias; // interleave Mat tmp; if (size >= 8) tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start = 0; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 4; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { // transpose 4x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { // transpose 4x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n" "st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const __fp16* img0 = bottom_blob.channel(0); img0 += i * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += bottom_blob.cstep * 4; } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* outptr0 = top_blob.channel(p); __fp16* outptr1 = top_blob.channel(p + 1); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x8_t _bias0 = vld1q_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "mov v28.16b, %10.16b \n" "mov v29.16b, %10.16b \n" "mov v30.16b, %10.16b \n" "mov v31.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v28.8h, v4.8h, v0.h[4] \n" "fmla v29.8h, v4.8h, v0.h[5] \n" "fmla v30.8h, v4.8h, v0.h[6] \n" "fmla v31.8h, v4.8h, v0.h[7] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v28.8h, v5.8h, v1.h[4] \n" "fmla v29.8h, v5.8h, v1.h[5] \n" "fmla v30.8h, v5.8h, v1.h[6] \n" "fmla v31.8h, v5.8h, v1.h[7] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "fmla v28.8h, v6.8h, v2.h[4] \n" "fmla v29.8h, v6.8h, v2.h[5] \n" "fmla v30.8h, v6.8h, v2.h[6] \n" "fmla v31.8h, v6.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "fmla v28.8h, v7.8h, v3.h[4] \n" "fmla v29.8h, v7.8h, v3.h[5] \n" "fmla v30.8h, v7.8h, v3.h[6] \n" "fmla v31.8h, v7.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(pp); float16x8_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(tmpptr); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); _sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3); kptr += 32; tmpptr += 4; } vst1_f16(outptr0, vget_low_f16(_sum0)); vst1_f16(outptr1, vget_high_f16(_sum0)); outptr0 += 4; outptr1 += 4; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x4_t _bias0 = vld1_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "mov v28.16b, %8.16b \n" "mov v29.16b, %8.16b \n" "mov v30.16b, %8.16b \n" "mov v31.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v28.4h, v4.4h, v0.h[4] \n" "fmla v29.4h, v4.4h, v0.h[5] \n" "fmla v30.4h, v4.4h, v0.h[6] \n" "fmla v31.4h, v4.4h, v0.h[7] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v28.4h, v5.4h, v1.h[4] \n" "fmla v29.4h, v5.4h, v1.h[5] \n" "fmla v30.4h, v5.4h, v1.h[6] \n" "fmla v31.4h, v5.4h, v1.h[7] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "fmla v28.4h, v6.4h, v2.h[4] \n" "fmla v29.4h, v6.4h, v2.h[5] \n" "fmla v30.4h, v6.4h, v2.h[6] \n" "fmla v31.4h, v6.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "fmla v28.4h, v7.4h, v3.h[4] \n" "fmla v29.4h, v7.4h, v3.h[5] \n" "fmla v30.4h, v7.4h, v3.h[6] \n" "fmla v31.4h, v7.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); float16x4_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(tmpptr); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); _sum0 = vfma_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_lane_f16(_sum0, _k3, _r0, 3); kptr += 16; tmpptr += 4; } vst1_f16(outptr0, _sum0); outptr0 += 4; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const short bias0 = bias ? bias[p] : 0.f; // // __fp16* outptr0 = out0; // // for (int i=0; i<size; i++) // { // short sum = bias0; // // const __fp16* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const __fp16* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x4_t _v0 = vld1_f16(r0); float16x4_t _v1 = vld1_f16(r0 + 8); float16x4_t _v2 = vld1_f16(r0 + 16); float16x4_t _v3 = vld1_f16(r0 + 24); float16x8_t _v01 = vcombine_f16(_v0, _v1); float16x8_t _v23 = vcombine_f16(_v2, _v3); vst1q_f16(outptr, _v01); vst1q_f16(outptr + 8, _v23); r0 += 32; outptr += 16; } for (; j + 1 < outw; j += 2) { float16x4_t _v0 = vld1_f16(r0); float16x4_t _v1 = vld1_f16(r0 + 8); float16x8_t _v = vcombine_f16(_v0, _v1); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } for (; j < outw; j++) { float16x4_t _v = vld1_f16(r0); vst1_f16(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
pfem_2_monolithic_slip_strategy.h
#ifndef KRATOS_PFEM2_MONOLITHIC_SLIP_STRATEGY_H #define KRATOS_PFEM2_MONOLITHIC_SLIP_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" //#include "custom_elements/fractional_step.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h" #include "solving_strategies/strategies/residualbased_linear_strategy.h" #include "custom_utilities/solver_settings.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template<class TSparseSpace, class TDenseSpace, class TLinearSolver > class PFEM2MonolithicSlipStrategy : public SolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver> { public: ///@name Type Definitions ///@{ /// Counted pointer of FSStrategy typedef boost::shared_ptr< FSStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef SolverSettings<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector): BaseType(rModelPart,false), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { InitializeStrategy(rSolverConfig,PredictorCorrector); } PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector, const Kratos::Variable<int>& PeriodicVar): BaseType(rModelPart,false), mrPeriodicIdVar(PeriodicVar) { InitializeStrategy(rSolverConfig,PredictorCorrector); } SolvingStrategyPython(self.model_part, self.time_scheme, self.monolithic_linear_solver, self.conv_criteria, CalculateReactionFlag, ReformDofSetAtEachStep, MoveMeshFlag) self.monolithic_solver.SetMaximumIterations(self.maximum_nonlin_iterations) PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pLinearSolver, bool ReformDofSet = true, double Tol = 0.01, int MaxIterations = 3, unsigned int DomainSize = 2): BaseType(rModelPart,MoveMeshFlag), // Move Mesh flag, pass as input? mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxVelocityIter(MaxVelocityIterations), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mPredictorCorrector(PredictorCorrector), mUseSlipConditions(true), ///@todo initialize somehow mReformDofSet(ReformDofSet), mExtraIterationSteps(), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateReactions = false; bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent; typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme< TSparseSpace, TDenseSpace > SchemeType; typename SchemeType::Pointer pScheme; if (mUseSlipConditions) { typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticSchemeSlip< TSparseSpace, TDenseSpace > (mDomainSize,mDomainSize)); pScheme.swap(Temp); } else { typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); pScheme.swap(Temp); } //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); // BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverSlip<TSparseSpace, TDenseSpace, TLinearSolver, VarComponent > (pNewVelocityLinearSolver, this->mDomainSize, VELOCITY_X, VELOCITY_Y, VELOCITY_Z)); this->mpMomentumStrategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pVelocityLinearSolver, vel_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel( BaseType::GetEchoLevel() ); BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer( //new ResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>(pPressureLinearSolver)); new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); this->mpPressureStrategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pPressureLinearSolver, pressure_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel( BaseType::GetEchoLevel() ); if (mUseSlipConditions) { #pragma omp parallel { ModelPart::ConditionIterator CondBegin; ModelPart::ConditionIterator CondEnd; OpenMPUtils::PartitionedIterators(rModelPart.Conditions(),CondBegin,CondEnd); for (ModelPart::ConditionIterator itCond = CondBegin; itCond != CondEnd; ++itCond) { const double FlagValue = itCond->GetValue(IS_STRUCTURE); if (FlagValue != 0.0) { Condition::GeometryType& rGeom = itCond->GetGeometry(); for (unsigned int i = 0; i < rGeom.PointsNumber(); ++i) { rGeom[i].SetLock(); rGeom[i].SetValue(IS_STRUCTURE,FlagValue); rGeom[i].UnSetLock(); } } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_STRUCTURE); } KRATOS_CATCH(""); } /// Destructor. virtual ~FSStrategy(){} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ virtual int Check() { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if(DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error,"DELTA_TIME Key is 0. Check that the application was correctly registered.",""); if(BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error,"BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.",""); ModelPart& rModelPart = BaseType::GetModelPart(); if ( mTimeOrder == 2 && rModelPart.GetBufferSize() < 3 ) KRATOS_THROW_ERROR(std::invalid_argument,"Buffer size too small for fractional step strategy (BDF2), needed 3, got ",rModelPart.GetBufferSize()); if ( mTimeOrder == 1 && rModelPart.GetBufferSize() < 2 ) KRATOS_THROW_ERROR(std::invalid_argument,"Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ",rModelPart.GetBufferSize()); const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); for ( ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl ) { ierr = itEl->Check(rCurrentProcessInfo); if (ierr != 0) break; } for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) { ierr = itCond->Check(rCurrentProcessInfo); if (ierr != 0) break; } return ierr; KRATOS_CATCH(""); } virtual double Solve() { // Initialize BDF2 coefficients ModelPart& rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); double NormDp = 0.0; if (mPredictorCorrector) { bool Converged = false; // Iterative solution for pressure for(unsigned int it = 0; it < mMaxPressureIter; ++it) { if ( BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Pressure iteration " << it << std::endl; NormDp = this->SolveStep(); Converged = this->CheckPressureConvergence(NormDp); if ( Converged ) { if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Predictor-corrector converged in " << it+1 << " iterations." << std::endl; break; } } if (!Converged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Predictor-correctior iterations did not converge." << std::endl; } else { // Solve for fractional step velocity, then update pressure once NormDp = this->SolveStep(); } if (mReformDofSet) this->Clear(); return NormDp; } virtual void CalculateReactions() { ModelPart& rModelPart = BaseType::GetModelPart(); ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // Set fractional step index to the momentum equation step int OriginalStep = rCurrentProcessInfo[FRACTIONAL_STEP]; rCurrentProcessInfo.SetValue(FRACTIONAL_STEP,1); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); const array_1d<double,3> Zero(3,0.0); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { itNode->FastGetSolutionStepValue(REACTION) = Zero; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { //itElem->InitializeNonLinearIteration(rCurrentProcessInfo); // Build local system itElem->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); Element::GeometryType& rGeom = itElem->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { rGeom[i].SetLock(); array_1d<double,3>& rReaction = rGeom[i].FastGetSolutionStepValue(REACTION); for (unsigned int d = 0; d < mDomainSize; ++d) rReaction[d] -= RHS_Contribution[index++]; rGeom[i].UnSetLock(); } } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // Reset original fractional step index rCurrentProcessInfo.SetValue(FRACTIONAL_STEP,OriginalStep); } virtual void AddIterationStep(Process::Pointer pNewStep) { mExtraIterationSteps.push_back(pNewStep); } virtual void ClearExtraIterationSteps() { mExtraIterationSteps.clear(); } virtual void Clear() { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ virtual void SetEchoLevel(int Level) { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "FSStrategy" ; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {rOStream << "FSStrategy";} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo& rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } double SolveStep() { ModelPart& rModelPart = BaseType::GetModelPart(); // 1. Fractional step momentum iteration rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); bool Converged = false; int Rank = rModelPart.GetCommunicator().MyPID(); for(unsigned int it = 0; it < mMaxVelocityIter; ++it) { if ( BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "Momentum iteration " << it << std::endl; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); double NormDv = mpMomentumStrategy->Solve(); // // Compute projections (for stabilization) // rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); // this->ComputeSplitOssProjections(rModelPart); // // Additional steps // Moved to end of step // for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin(); // iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps) // (*iExtraSteps)->Execute(); // Check convergence Converged = this->CheckFractionalStepConvergence(NormDv); if (Converged) { if ( BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Fractional velocity converged in " << it+1 << " iterations." << std::endl; break; } } if (!Converged && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Fractional velocity iterations did not converge." << std::endl; // Compute projections (for stabilization) rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); this->ComputeSplitOssProjections(rModelPart); // 2. Pressure solution (store pressure variation in PRESSURE_OLD_IT) rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,5); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { const double OldPress = itNode->FastGetSolutionStepValue(PRESSURE); itNode->FastGetSolutionStepValue(PRESSURE_OLD_IT) = -OldPress; } } if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Calculating Pressure." << std::endl; double NormDp = mpPressureStrategy->Solve(); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) itNode->FastGetSolutionStepValue(PRESSURE_OLD_IT) += itNode->FastGetSolutionStepValue(PRESSURE); } // 3. Compute end-of-step velocity if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Updating Velocity." << std::endl; rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,6); this->CalculateEndOfStepVelocity(); // Additional steps for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin(); iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps) (*iExtraSteps)->Execute(); return NormDp; } bool CheckFractionalStepConvergence(const double NormDv) { ModelPart& rModelPart = BaseType::GetModelPart(); double NormV = 0.00; #pragma omp parallel reduction(+:NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double,3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) NormV += Vel[d] * Vel[d]; } } BaseType::GetModelPart().GetCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; double Ratio = NormDv / NormV; if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Fractional velocity relative error: " << Ratio << std::endl; if (Ratio < mVelocityTolerance) { return true; } else return false; } bool CheckPressureConvergence(const double NormDp) { ModelPart& rModelPart = BaseType::GetModelPart(); double NormP = 0.00; #pragma omp parallel reduction(+:NormP) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } } BaseType::GetModelPart().GetCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; double Ratio = NormDp / NormP; if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Pressure relative error: " << Ratio << std::endl; if (Ratio < mPressureTolerance) { return true; } else return false; } void ComputeSplitOssProjections(ModelPart& rModelPart) { const array_1d<double,3> Zero(3,0.0); array_1d<double,3> Out(3,0.0); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { itNode->FastGetSolutionStepValue(CONV_PROJ) = Zero; itNode->FastGetSolutionStepValue(PRESS_PROJ) = Zero; itNode->FastGetSolutionStepValue(DIVPROJ) = 0.0; itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem ) { itElem->Calculate(CONV_PROJ,Out,rModelPart.GetProcessInfo()); } } rModelPart.GetCommunicator().AssembleCurrentData(CONV_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); // If there are periodic conditions, add contributions from both sides to the periodic nodes this->PeriodicConditionProjectionCorrection(rModelPart); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); itNode->FastGetSolutionStepValue(CONV_PROJ) /= NodalArea; itNode->FastGetSolutionStepValue(PRESS_PROJ) /= NodalArea; itNode->FastGetSolutionStepValue(DIVPROJ) /= NodalArea; } } } void CalculateEndOfStepVelocity() { ModelPart& rModelPart = BaseType::GetModelPart(); const array_1d<double,3> Zero(3,0.0); array_1d<double,3> Out(3,0.0); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { itNode->FastGetSolutionStepValue(FRACT_VEL) = Zero; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem ) { itElem->Calculate(VELOCITY,Out,rModelPart.GetProcessInfo()); } } rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL); this->PeriodicConditionVelocityCorrection(rModelPart); // Force the end of step velocity to verify slip conditions in the model if (mUseSlipConditions) this->EnforceSlipCondition(IS_STRUCTURE); if (mDomainSize > 2) { #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); if ( ! itNode->IsFixed(VELOCITY_X) ) itNode->FastGetSolutionStepValue(VELOCITY_X) += itNode->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Y) ) itNode->FastGetSolutionStepValue(VELOCITY_Y) += itNode->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Z) ) itNode->FastGetSolutionStepValue(VELOCITY_Z) += itNode->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalArea; } } } else { #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); if ( ! itNode->IsFixed(VELOCITY_X) ) itNode->FastGetSolutionStepValue(VELOCITY_X) += itNode->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Y) ) itNode->FastGetSolutionStepValue(VELOCITY_Y) += itNode->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; } } } } /** * @brief Substract wall-normal component of velocity update to ensure that the final velocity satisfies slip conditions. * @param rSlipWallFlag If Node.GetValue(rSlipWallFlag) != 0, the node is in the wall. */ void EnforceSlipCondition(Variable<double>& rSlipWallFlag) { ModelPart& rModelPart = BaseType::GetModelPart(); #pragma omp parallel { ModelPart::NodeIterator NodeBegin; // = rModelPart.NodesBegin(); ModelPart::NodeIterator NodeEnd; // = rModelPart.NodesEnd(); OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for ( ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode ) { if ( itNode->GetValue(rSlipWallFlag) != 0.0 ) { const array_1d<double,3>& rNormal = itNode->FastGetSolutionStepValue(NORMAL); array_1d<double,3>& rDeltaVelocity = itNode->FastGetSolutionStepValue(FRACT_VEL); double Proj = rNormal[0] * rDeltaVelocity[0]; double Norm = rNormal[0] * rNormal[0]; for (unsigned int d = 1; d < mDomainSize; ++d) { Proj += rNormal[d] * rDeltaVelocity[d]; Norm += rNormal[d] * rNormal[d]; } Proj /= Norm; rDeltaVelocity -= Proj * rNormal; } } } } /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, transmiting the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { if (mrPeriodicIdVar.Key() != 0) { int GlobalNodesNum = rModelPart.GetCommunicator().LocalMesh().Nodes().size(); rModelPart.GetCommunicator().SumAll(GlobalNodesNum); for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ ) { ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry(); if (rGeom.PointsNumber() == 2) { Node<3>& rNode0 = rGeom[0]; int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar); Node<3>& rNode1 = rGeom[1]; int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar); // If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition) if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) ) { double NodalArea = rNode0.FastGetSolutionStepValue(NODAL_AREA) + rNode1.FastGetSolutionStepValue(NODAL_AREA); array_1d<double,3> ConvProj = rNode0.FastGetSolutionStepValue(CONV_PROJ) + rNode1.FastGetSolutionStepValue(CONV_PROJ); array_1d<double,3> PressProj = rNode0.FastGetSolutionStepValue(PRESS_PROJ) + rNode1.FastGetSolutionStepValue(PRESS_PROJ); double DivProj = rNode0.FastGetSolutionStepValue(DIVPROJ) + rNode1.FastGetSolutionStepValue(DIVPROJ); rNode0.GetValue(NODAL_AREA) = NodalArea; rNode0.GetValue(CONV_PROJ) = ConvProj; rNode0.GetValue(PRESS_PROJ) = PressProj; rNode0.GetValue(DIVPROJ) = DivProj; rNode1.GetValue(NODAL_AREA) = NodalArea; rNode1.GetValue(CONV_PROJ) = ConvProj; rNode1.GetValue(PRESS_PROJ) = PressProj; rNode1.GetValue(DIVPROJ) = DivProj; } } else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum) { double NodalArea = rGeom[0].FastGetSolutionStepValue(NODAL_AREA); array_1d<double,3> ConvProj = rGeom[0].FastGetSolutionStepValue(CONV_PROJ); array_1d<double,3> PressProj = rGeom[0].FastGetSolutionStepValue(PRESS_PROJ); double DivProj = rGeom[0].FastGetSolutionStepValue(DIVPROJ); for (unsigned int i = 1; i < 4; i++) { NodalArea += rGeom[i].FastGetSolutionStepValue(NODAL_AREA); ConvProj += rGeom[i].FastGetSolutionStepValue(CONV_PROJ); PressProj += rGeom[i].FastGetSolutionStepValue(PRESS_PROJ); DivProj += rGeom[i].FastGetSolutionStepValue(DIVPROJ); } for (unsigned int i = 0; i < 4; i++) { rGeom[i].GetValue(NODAL_AREA) = NodalArea; rGeom[i].GetValue(CONV_PROJ) = ConvProj; rGeom[i].GetValue(PRESS_PROJ) = PressProj; rGeom[i].GetValue(DIVPROJ) = DivProj; } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(CONV_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { if (itNode->GetValue(NODAL_AREA) != 0.0) { itNode->FastGetSolutionStepValue(NODAL_AREA) = itNode->GetValue(NODAL_AREA); itNode->FastGetSolutionStepValue(CONV_PROJ) = itNode->GetValue(CONV_PROJ); itNode->FastGetSolutionStepValue(PRESS_PROJ) = itNode->GetValue(PRESS_PROJ); itNode->FastGetSolutionStepValue(DIVPROJ) = itNode->GetValue(DIVPROJ); // reset for next iteration itNode->GetValue(NODAL_AREA) = 0.0; itNode->GetValue(CONV_PROJ) = array_1d<double,3>(3,0.0); itNode->GetValue(PRESS_PROJ) = array_1d<double,3>(3,0.0); itNode->GetValue(DIVPROJ) = 0.0; } } } } void PeriodicConditionVelocityCorrection(ModelPart& rModelPart) { if (mrPeriodicIdVar.Key() != 0) { int GlobalNodesNum = rModelPart.GetCommunicator().LocalMesh().Nodes().size(); rModelPart.GetCommunicator().SumAll(GlobalNodesNum); for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ ) { ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry(); if (rGeom.PointsNumber() == 2) { Node<3>& rNode0 = rGeom[0]; int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar); Node<3>& rNode1 = rGeom[1]; int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar); // If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition) if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) ) { array_1d<double,3> DeltaVel = rNode0.FastGetSolutionStepValue(FRACT_VEL) + rNode1.FastGetSolutionStepValue(FRACT_VEL); rNode0.GetValue(FRACT_VEL) = DeltaVel; rNode1.GetValue(FRACT_VEL) = DeltaVel; } } else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum) { array_1d<double,3> DeltaVel = rGeom[0].FastGetSolutionStepValue(FRACT_VEL); for (unsigned int i = 1; i < 4; i++) { DeltaVel += rGeom[i].FastGetSolutionStepValue(FRACT_VEL); } for (unsigned int i = 0; i < 4; i++) { rGeom[i].GetValue(FRACT_VEL) = DeltaVel; } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(FRACT_VEL); for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { array_1d<double,3>& rDeltaVel = itNode->GetValue(FRACT_VEL); if ( rDeltaVel[0]*rDeltaVel[0] + rDeltaVel[1]*rDeltaVel[1] + rDeltaVel[2]*rDeltaVel[2] != 0.0) { itNode->FastGetSolutionStepValue(FRACT_VEL) = itNode->GetValue(FRACT_VEL); rDeltaVel = array_1d<double,3>(3,0.0); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxVelocityIter; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mPredictorCorrector; bool mUseSlipConditions; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; std::vector< Process::Pointer > mExtraIterationSteps; const Kratos::Variable<int>& mrPeriodicIdVar; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void InitializeStrategy(SolverSettingsType& rSolverConfig, bool PredictorCorrector) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); ModelPart& rModelPart = this->GetModelPart(); mDomainSize = rSolverConfig.GetDomainSize(); mPredictorCorrector = PredictorCorrector; mUseSlipConditions = rSolverConfig.UseSlipConditions(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity,mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity,mVelocityTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); } else { KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Velocity strategy defined in FractionalStepSettings",""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure,mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure,mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure,mMaxPressureIter); } else { KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Pressure strategy defined in FractionalStepSettings",""); } Process::Pointer pTurbulenceProcess; bool HaveTurbulence = rSolverConfig.GetTurbulenceModel(pTurbulenceProcess); if (HaveTurbulence) mExtraIterationSteps.push_back(pTurbulenceProcess); // Set up nodes to use slip conditions if needed. if (mUseSlipConditions) { #pragma omp parallel { ModelPart::ConditionIterator CondBegin; ModelPart::ConditionIterator CondEnd; OpenMPUtils::PartitionedIterators(rModelPart.Conditions(),CondBegin,CondEnd); for (ModelPart::ConditionIterator itCond = CondBegin; itCond != CondEnd; ++itCond) { const double FlagValue = itCond->GetValue(IS_STRUCTURE); if (FlagValue != 0.0) { Condition::GeometryType& rGeom = itCond->GetGeometry(); for (unsigned int i = 0; i < rGeom.PointsNumber(); ++i) { rGeom[i].SetLock(); rGeom[i].SetValue(IS_STRUCTURE,FlagValue); rGeom[i].UnSetLock(); } } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_STRUCTURE); } // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. FSStrategy& operator=(FSStrategy const& rOther){} /// Copy constructor. FSStrategy(FSStrategy const& rOther){} ///@} }; /// Class FStepStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_FS_STRATEGY_H
fabio_c.c
/* Contains the IO routines for fabio module */ #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <float.h> #include <limits.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <unistd.h> #define FILE_MODE ( S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) #define DIR_MODE (FILE_MODE | S_IXUSR | S_IXGRP | S_IXOTH) #define O_BINARY 0 static const int BUFFER_SIZE = 512; static const int FABIO_MAX_PATH_NAME = 512; static void int_2_str(char f[], int n, const int* fi) { int i; for ( i = 0; i < n; ++i ) { if ( fi[i] < 0 ) { f[i] = 0; break; } f[i] = (char)fi[i]; } if ( i == n ) { fprintf(stderr, "name to long, probably not terminated ifilename\n"); exit(1); } } void fabio_open_str(int* fdp, const int* ifilename, const int* flagp) { int lflag; int lmode; char filename[FABIO_MAX_PATH_NAME]; int_2_str(filename, sizeof(filename), ifilename); switch ( *flagp ) { case 0: lflag = O_RDONLY; break; case 1: lflag = O_WRONLY | O_CREAT | O_TRUNC | O_BINARY; lmode = FILE_MODE; break; case 2: lflag = O_RDWR; break; case 3: lflag = O_RDWR | O_APPEND; break; default: fprintf(stderr, "fabio_open_str: invalid flag, %d, must be <=0<=2", *flagp); exit(1); } *fdp = open(filename, lflag, lmode); if ( *fdp == -1 ) { fprintf(stderr, "fabio_open_str: failed to open \"%s\": %s\n", filename, strerror(errno)); exit(1); } } /* * DOUBLE data * FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27 * FLOAT data * FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4) ))((0,0) (63,63) (0,0)) 27 */ /* * NORDER_? : normal byte order floats(f), doubles(d) on this architecture */ static const char* str_ieee_d = "64 11 52 0 1 12 0 1023"; static const char* str_ieee_f = "32 8 23 0 1 9 0 127"; #if defined(__sgi) || \ defined(__sun) || \ defined(_AIX) || \ defined(__ppc__) || \ defined(__ppc64__) || \ defined(_SX) || \ defined(__hpux) #if !defined(__LITTLE_ENDIAN__) static const int norder_d[8] = { 1, 2, 3, 4, 5, 6, 7, 8}; static const char* str_norder_d = "1 2 3 4 5 6 7 8"; static const int norder_f[4] = { 1, 2, 3, 4}; static const char* str_norder_f = "1 2 3 4"; #endif #endif #if defined(__i486__) || \ defined(i386) || \ defined(__i386__) || \ defined(__x86_64) || \ defined(__amd64__) || \ defined(__LITTLE_ENDIAN__) || \ defined(__powerpc__) || \ defined(powerpc) static const int norder_d[8] = { 8, 7, 6, 5, 4, 3, 2, 1}; static const char* str_norder_d = "8 7 6 5 4 3 2 1"; static const int norder_f[4] = { 4, 3, 2, 1 }; static const char* str_norder_f = "4 3 2 1"; #endif enum { FABIO_ERR = 0, /* cf. fabio.f90 */ FABIO_SINGLE = 2, FABIO_DOUBLE = 1 }; static int scan_buffer(const char* buffer, int border[]) { int i; int bcount; char bstr[1024]; /* first try for double data */ i = sscanf(buffer, "FAB ((8, (64 11 52 0 1 12 0 1023)),(%d, (%[^)])))", &bcount, bstr); if ( i == 2 ) { i = sscanf(bstr, "%d %d %d %d %d %d %d %d", border + 0, border + 1, border + 2, border + 3, border + 4, border + 5, border + 6, border + 7 ); if ( i != 8 ) { fprintf(stderr, "FABIO: scan_buffer failed to parse FAB border\n" "Not double precision data\n"); exit(1); } return FABIO_DOUBLE; } /* second, try for float data */ i = sscanf(buffer, "FAB ((8, (32 8 23 0 1 9 0 127)),(%d, (%[^)])))", &bcount, bstr); if ( i == 2 ) { i = sscanf(bstr, "%d %d %d %d", border + 0, border + 1, border + 2, border + 3 ); if ( i != 4) { fprintf(stderr, "FABIO: scan_buffer failed to parse FAB border\n" "Not double precision data\n"); exit(1); } return FABIO_SINGLE; } fprintf(stderr, "FABIO: scan_buffer failed to parse FAB header\n" "Architecture difference for floating point format\n"); exit(1); return FABIO_ERR; } void fabio_read_skip_d(const int* fdp, const long* offsetp, const long* skipp, double dp[], const long* countp) { int fd = *fdp; char c; size_t count = *countp; off_t offset = *offsetp; off_t skip = *skipp; int i,j; char buffer[1024]; int border[8]; int swap_bytes = 0; if ( lseek(fd, offset, SEEK_SET) < 0 ) { fprintf(stderr, "fabio_read_skip_d: failed to seek to %ld: %s\n", offset, strerror(errno)); exit(1); } for (i=0;;i++) { if ( read(fd, &c, 1) != 1 ) { fprintf(stderr, "fabio_read_skip_d: failed to read a char: %s\n", strerror(errno)); exit(1); } if ( c == '\n' ) break; if ( i == sizeof(buffer) ) { fprintf(stderr, "fabio_read_skip_d: failed FAB header\n"); exit(1); } buffer[i] = c; } buffer[i] = 0; i = scan_buffer(buffer, border); if ( i == FABIO_DOUBLE ) { /* should be positioned to read the doubles */ if ( skip && lseek(fd, skip*sizeof(double), SEEK_CUR) < 0 ) { fprintf(stderr, "fabio_read_skip_d: failed to seek to comp %ld: %s\n", offset, strerror(errno)); exit(1); } if ( count*sizeof(double) != read(fd, dp, count*sizeof(double)) ) { fprintf(stderr, "fabio_read_skip_d: failed to read %ld doubles: %s\n", (long)count, strerror(errno)); exit(1); } for ( j = 0; j < 8; ++j ) { if (border[j] != norder_d[j] ) { swap_bytes = 1; break; } } if ( swap_bytes ) { unsigned char* cdp = (unsigned char*)dp; for ( i = 0; i < count; i++ ) { unsigned char t[8]; for ( j = 0; j < 8; j++ ) { t[j] = cdp[border[j]-1]; } for ( j = 0; j < 8; j++ ) { cdp[j] = t[norder_d[j]-1]; } cdp += 8; } } } else if ( i == FABIO_SINGLE ) { float* fp; if ( (fp = (float *) malloc(count*sizeof(float))) == NULL) { fprintf(stderr, "fabio_read_skip_d: failed to allocate fp\n"); exit(1); } /* should be positioned to read the doubles */ if ( skip && lseek(fd, skip*sizeof(float), SEEK_CUR) < 0 ) { fprintf(stderr, "fabio_read_skip_d: failed to seek to comp %ld: %s\n", offset, strerror(errno)); exit(1); } if ( count*sizeof(float) != read(fd, fp, count*sizeof(float)) ) { fprintf(stderr, "fabio_read_skip_d: failed to read %ld doubles: %s\n", (long)count, strerror(errno)); exit(1); } for ( j = 0; j < 4; ++j ) { if (border[j] != norder_f[j] ) { swap_bytes = 1; break; } } if ( swap_bytes ) { unsigned char* csp = (unsigned char*)fp; for ( i = 0; i < count; i++ ) { unsigned char t[4]; for ( j = 0; j < 4; j++ ) { t[j] = csp[border[j]-1]; } for ( j = 0; j < 4; j++ ) { csp[j] = t[norder_f[j]-1]; } csp += 4; } } for ( i = 0; i < count; i++) { dp[i] = (double)fp[i]; } free(fp); } } void fabio_read_skip_s(const int* fdp, const long* offsetp, const long* skipp, float sp[], const long* countp) { int fd = *fdp; char c; size_t count = *countp; off_t offset = *offsetp; off_t skip = *skipp; int i,j; char buffer[1024]; int border[8]; int swap_bytes = 0; if ( lseek(fd, offset, SEEK_SET) < 0 ) { fprintf(stderr, "fabio_read_skip_s: failed to seek to %ld: %s\n", offset, strerror(errno)); exit(1); } for (i=0;;i++) { if ( read(fd, &c, 1) != 1 ) { fprintf(stderr, "fabio_read_skip_s: failed to read a char: %s\n", strerror(errno)); exit(1); } if ( c == '\n' ) break; if ( i == sizeof(buffer) ) { fprintf(stderr, "fabio_read_skip_s: failed FAB header\n"); exit(1); } buffer[i] = c; } buffer[i] = 0; i = scan_buffer(buffer, border); if ( i == FABIO_DOUBLE ) { double* dp; if ( (dp = (double *) malloc(count*sizeof(double))) == NULL) { fprintf(stderr, "fabio_read_skip_s: failed to allocate sp\n"); exit(1); } /* should be positioned to read the doubles */ if ( skip && lseek(fd, skip*sizeof(double), SEEK_CUR) < 0 ) { fprintf(stderr, "fabio_read_skip_s: failed to seek to comp %ld: %s\n", offset, strerror(errno)); exit(1); } if ( count*sizeof(double) != read(fd, dp, count*sizeof(double)) ) { fprintf(stderr, "fabio_read_skip_s: failed to read %ld doubles: %s\n", (long)count, strerror(errno)); exit(1); } for ( j = 0; j < 8; ++j ) { if (border[j] != norder_d[j] ) { swap_bytes = 1; break; } } if ( swap_bytes ) { unsigned char* cdp = (unsigned char*)dp; for ( i = 0; i < count; i++ ) { unsigned char t[8]; for ( j = 0; j < 8; j++ ) { t[j] = cdp[border[j]-1]; } for ( j = 0; j < 8; j++ ) { cdp[j] = t[norder_d[j]-1]; } cdp += 8; } } free(dp); for ( i = 0; i < count; i++ ) { if ( dp[i] > FLT_MAX ) sp[i] = FLT_MAX; else if ( dp[i] < -FLT_MAX ) sp[i] = -FLT_MAX; else sp[i] = (float)dp[i]; } } else if ( i == FABIO_SINGLE ) { /* should be positioned to read the doubles */ if ( skip && lseek(fd, skip*sizeof(float), SEEK_CUR) < 0 ) { fprintf(stderr, "fabio_read_skip_s: failed to seek to comp %ld: %s\n", offset, strerror(errno)); exit(1); } if ( count*sizeof(float) != read(fd, sp, count*sizeof(float)) ) { fprintf(stderr, "fabio_read_skip_s: failed to read %ld doubles: %s\n", (long)count, strerror(errno)); exit(1); } for ( j = 0; j < 4; ++j ) { if (border[j] != norder_f[j] ) { swap_bytes = 1; break; } } if ( swap_bytes ) { unsigned char* csp = (unsigned char*)sp; for ( i = 0; i < count; i++ ) { unsigned char t[4]; for ( j = 0; j < 4; j++ ) { t[j] = csp[border[j]-1]; } for ( j = 0; j < 4; j++ ) { csp[j] = t[norder_f[j]-1]; } csp += 4; } } } } /* ** These four guys are used by the particle code. */ void fabio_write_raw_array_d(const int* fdp, const double* vp, const int* countp) { int fd = *fdp; size_t count = *countp; int ilen = sizeof(double) * count; lseek(fd, 0, SEEK_END); if ( ilen != write(fd, vp, ilen) ) { fprintf(stderr, "fabio_write_raw_array_d: failed to write %d bytes: %s\n", ilen, strerror(errno)); exit(1); } } void fabio_write_raw_array_i(const int* fdp, const int* vp, const int* countp) { int fd = *fdp; size_t count = *countp; int ilen = sizeof(int) * count; lseek(fd, 0, SEEK_END); if ( ilen != write(fd, vp, ilen) ) { fprintf(stderr, "fabio_write_raw_array_i: failed to write %d bytes: %s\n", ilen, strerror(errno)); exit(1); } } void fabio_read_raw_array_d(const int* fdp, double* vp, const int* countp) { int fd = *fdp; size_t count = *countp; int ilen = sizeof(double) * count; if ( ilen != read(fd, vp, ilen) ) { fprintf(stderr, "fabio_read_raw_array_d: failed to write %d bytes: %s\n", ilen, strerror(errno)); exit(1); } } void fabio_read_raw_array_i(const int* fdp, int* vp, const int* countp) { int fd = *fdp; size_t count = *countp; int ilen = sizeof(int) * count; if ( ilen != read(fd, vp, ilen) ) { fprintf(stderr, "fabio_read_raw_array_i: failed to write %d bytes: %s\n", ilen, strerror(errno)); exit(1); } } void fabio_write_raw_d(const int* fdp, long* offsetp, const double* vp, const long* countp, const int* dmp, const int lo[], const int hi[], const int nd[], const int* ncp) { int fd = *fdp; int dm = *dmp; int nc = *ncp; size_t count = *countp; off_t offset; char buffer[BUFFER_SIZE]; int ilen; double* dp = (double*)vp; offset = lseek(fd, 0, SEEK_END); if ( snprintf(buffer, BUFFER_SIZE, "FAB ((8, (%s)),(8, (%s)))", str_ieee_d, str_norder_d) >= BUFFER_SIZE ) { fprintf(stderr, "fabio_write_raw_d: buffer too small"); exit(1); } ilen = strlen(buffer); if ( ilen != write(fd, buffer, ilen) ) { fprintf(stderr, "fabio_write_raw_d: failed to write %d bytes: %s\n", ilen, strerror(errno)); exit(1); } switch ( dm ) { case 1: ilen = snprintf(buffer, BUFFER_SIZE, "((%d) (%d) (%d)) %d\n", lo[0], hi[0], nd[0], nc); break; case 2: ilen = snprintf(buffer, BUFFER_SIZE, "((%d,%d) (%d,%d) (%d,%d)) %d\n", lo[0], lo[1], hi[0], hi[1], nd[0], nd[1], nc); break; case 3: ilen = snprintf(buffer, BUFFER_SIZE, "((%d,%d,%d) (%d,%d,%d) (%d,%d,%d)) %d\n", lo[0], lo[1], lo[2], hi[0], hi[1], hi[2], nd[0], nd[1], nd[2], nc); break; default: fprintf(stderr, "fabio_write_raw_d: strange dimension = %d\n", dm); exit(1); } if ( ilen >= BUFFER_SIZE ) { fprintf(stderr, "fabio_write_raw_d: buffer too small"); exit(1); } ilen = write(fd, buffer, strlen(buffer)); if ( ilen != strlen(buffer) ) { fprintf(stderr, "fabio_write_raw_d: write of buffer failed\n"); exit(1); } ilen = nc*count*sizeof(double); if ( ilen != write(fd, dp, ilen) ) { fprintf(stderr, "fabio_write_raw_d: failed to write %ld doubles: %s\n", (long)nc*count, strerror(errno)); exit(1); } if ( offset > LONG_MAX ) { fprintf(stderr, "fabio_write_raw_d: offset will overflow offsetp"); exit(1); } *offsetp = offset; } void fabio_write_raw_s(const int* fdp, long* offsetp, const float* vp, const long* countp, const int* dmp, const int lo[], const int hi[], const int nd[], const int* ncp) { int fd = *fdp; int dm = *dmp; int nc = *ncp; size_t count = *countp; off_t offset; char buffer[BUFFER_SIZE]; int ilen; float* sp = (float*)vp; offset = lseek(fd, 0, SEEK_END); if ( snprintf(buffer, BUFFER_SIZE, "FAB ((8, (%s)),(4, (%s)))", str_ieee_f, str_norder_f) >= BUFFER_SIZE ) { fprintf(stderr, "fabio_write_raw_s: buffer too small"); exit(1); } ilen = strlen(buffer); if ( ilen != write(fd, buffer, ilen) ) { fprintf(stderr, "fabio_write_raw_s: failed to write %d bytes: %s\n", ilen, strerror(errno)); exit(1); } switch ( dm ) { case 1: ilen = snprintf(buffer, BUFFER_SIZE, "((%d) (%d) (%d)) %d\n", lo[0], hi[0], nd[0], nc); break; case 2: ilen = snprintf(buffer, BUFFER_SIZE, "((%d,%d) (%d,%d) (%d,%d)) %d\n", lo[0], lo[1], hi[0], hi[1], nd[0], nd[1], nc); break; case 3: ilen = snprintf(buffer, BUFFER_SIZE, "((%d,%d,%d) (%d,%d,%d) (%d,%d,%d)) %d\n", lo[0], lo[1], lo[2], hi[0], hi[1], hi[2], nd[0], nd[1], nd[2], nc); break; default: fprintf(stderr, "fabio_write_raw_s: strange dimension = %d\n", dm); exit(1); } if ( ilen >= BUFFER_SIZE ) { fprintf(stderr, "fabio_write_raw_s: buffer too small"); exit(1); } ilen = write(fd, buffer, strlen(buffer)); if ( ilen != strlen(buffer) ) { fprintf(stderr, "fabio_write_raw_s: write of buffer failed\n"); exit(1); } ilen = nc*count*sizeof(float); if ( ilen != write(fd, sp, ilen) ) { fprintf(stderr, "fabio_write_raw_s: failed to write %ld floats: %s\n", (long)nc*count, strerror(errno)); exit(1); } if ( offset > LONG_MAX ) { fprintf(stderr, "fabio_write_raw_s: offset will overflow offsetp"); exit(1); } *offsetp = offset; } void fabio_read_d(const int* fdp, const long* offsetp, double dp[], const long* countp) { long skip = 0; fabio_read_skip_d(fdp, offsetp, &skip, dp, countp); } void fabio_read_s(const int* fdp, const long* offsetp, float sp[], const long* countp) { long skip = 0; fabio_read_skip_s(fdp, offsetp, &skip, sp, countp); } void fabio_close(const int* fdp) { int fd = *fdp; if ( close(fd) < 0 ) { fprintf(stderr, "fabio_close: failed to close %d: %s\n", fd, strerror(errno)); exit(1); } } void fabio_mkdir_str(const int* idirname, int* statp) { mode_t mode = DIR_MODE; int st = *statp; char dirname[FABIO_MAX_PATH_NAME]; int_2_str(dirname, sizeof(dirname), idirname); *statp = 0; /* we allow the mkdir on an existing directory */ if ( mkdir(dirname, mode) <0 && errno != EEXIST ) { if ( st ) { *statp = 1; return; } else { fprintf(stderr, "fabio_mkdir_str: mkdir(%s,%d): %s\n", dirname, mode, strerror(errno)); exit(1); } } } void fabio_unlink_if_empty_str(const int* ifilename) { int fd; char filename[FABIO_MAX_PATH_NAME]; int lmode = FILE_MODE; int pos; int_2_str(filename, sizeof(filename), ifilename); if ((fd = open(filename, O_RDONLY, lmode)) < 0) { fprintf(stderr, "fabio_unlink_if_empty: open() failed: \"%s\": %s\n", filename, strerror(errno)); exit(1); } if ((pos = lseek(fd, 0, SEEK_END)) < 0) { fprintf(stderr, "fabio_unlink_if_empty: lseek() failed: \"%s\": %s\n", filename, strerror(errno)); exit(1); } close(fd); if (pos == 0) { if (unlink(filename) < 0) { fprintf(stderr, "fabio_unlink_if_empty: unlink() failed: \"%s\": %s\n", filename, strerror(errno)); exit(1); } } } void fab_contains_nan (double dptr[], const int* countp, int* result) { int i; int rr=0; #ifdef _OPENMP #pragma omp parallel reduction(+:rr) #endif { #ifdef _OPENMP #pragma omp for private(i) #endif for (i = 0; i < *countp; i++) { if (isnan(dptr[i])) { rr++; } } } *result = (rr>0) ? 1 : 0; } void fab_contains_inf (double dptr[], const int* countp, int* result) { int i; int rr=0; #ifdef _OPENMP #pragma omp parallel reduction(+:rr) #endif { #ifdef _OPENMP #pragma omp for private(i) #endif for (i = 0; i < *countp; i++) { if (isinf(dptr[i])) { rr++; } } } *result = (rr>0) ? 1 : 0; } void val_is_inf (double* val, int* result) { *result = (isinf(*val) ? 1 : 0); } void val_is_nan (double* val, int* result) { *result = (isnan(*val) ? 1 : 0); }
matrix_multiply_omp.c
/* --- File matrix_multiply_omp.c --- */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int main(int argc, char **argv) { struct timespec ts_start, ts_end; int size = 1e4; int **a, *c; int i, j; float time_total; /* Allocate memory */ c = malloc(size * sizeof(int)); a = (int **)malloc(size * sizeof(int *)); for (i = 0; i < size; i++) a[i] = malloc(size * sizeof(int)); /* Set all matrix elements to 1 */ for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { a[i][j] = 1; } } /* Zero the accumulator */ for (i = 0; i < size; i++) { c[i] = 0; } clock_gettime(CLOCK_MONOTONIC, &ts_start); #pragma omp parallel for /* Each thread sums one column */ for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { c[i] += a[i][j]; } } int total = 0; /* Add sums of all columns together */ for (i = 0; i < size; i++) { total += c[i]; } clock_gettime(CLOCK_MONOTONIC, &ts_end); time_total = (ts_end.tv_sec - ts_start.tv_sec) * 1e9 + (ts_end.tv_nsec - ts_start.tv_nsec); printf("Total is %d, time is %f ms\n", total, time_total / 1e6); }
nested_parallel_tasking.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> /* * This test would hang when level instead of active level * used to push task state. */ int main() { // If num_threads is changed to a value greater than 1, then the test passes #pragma omp parallel num_threads(1) { #pragma omp parallel printf("Hello World from thread %d\n", omp_get_thread_num()); } printf("omp_num_threads: %d\n", omp_get_max_threads()); #pragma omp parallel { #pragma omp master #pragma omp task default(none) { printf("%d is executing this task\n", omp_get_thread_num()); } } printf("pass\n"); return 0; }
GB_binop__le_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__le_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__le_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint32) // A*D function (colscale): GB (_AxD__le_uint32) // D*A function (rowscale): GB (_DxB__le_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint32) // C=scalar+B GB (_bind1st__le_uint32) // C=scalar+B' GB (_bind1st_tran__le_uint32) // C=A+scalar GB (_bind2nd__le_uint32) // C=A'+scalar GB (_bind2nd_tran__le_uint32) // C type: bool // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT32 || GxB_NO_LE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__le_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dropout_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <cstring> #include <random> #include <string> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; template <typename DeviceContext, typename T> class CPUDropoutKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* y = context.Output<Tensor>("Out"); const auto* x_data = x->data<T>(); auto* y_data = y->mutable_data<T>(context.GetPlace()); float dropout_prob = context.Attr<float>("dropout_prob"); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); bool upscale_in_train = (dropout_implementation == "upscale_in_train"); if (!context.Attr<bool>("is_test")) { auto* mask = context.Output<Tensor>("Mask"); auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace()); size_t size = framework::product(mask->dims()); // Special case when dropout_prob is 1.0 if (dropout_prob == 1.0f) { std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT return; } // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. std::random_device rnd; std::minstd_rand engine; int seed = context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd(); engine.seed(seed); std::uniform_real_distribution<float> dist(0, 1); for (size_t i = 0; i < size; ++i) { if (dist(engine) < dropout_prob) { mask_data[i] = 0; y_data[i] = 0; } else { mask_data[i] = 1; if (upscale_in_train) { y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob); } else { y_data[i] = x_data[i]; } } } } else { if (upscale_in_train) { const auto* X_data = x->data<T>(); auto* Y_data = y->mutable_data<T>(context.GetPlace()); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < x->numel(); i++) { Y_data[i] = X_data[i]; } } else { auto X = EigenMatrix<T>::Reshape(*x, 1); auto Y = EigenMatrix<T>::Reshape(*y, 1); auto& place = *context.template device_context<DeviceContext>().eigen_device(); Y.device(place) = X * static_cast<T>(1.0f - dropout_prob); } } } }; template <typename DeviceContext, typename T> class DropoutGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(!context.Attr<bool>("is_test"), "GradOp is only callable when is_test is false"); auto* grad_x = context.Output<Tensor>(framework::GradVarName("X")); auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out")); auto* mask = context.Input<Tensor>("Mask"); grad_x->mutable_data<T>(context.GetPlace()); auto M = EigenMatrix<uint8_t>::Reshape(*mask, 1); auto dX = EigenMatrix<T>::Reshape(*grad_x, 1); auto dY = EigenMatrix<T>::Reshape(*grad_y, 1); auto& place = *context.template device_context<DeviceContext>().eigen_device(); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); if (dropout_implementation == "upscale_in_train") { float dropout_prob = context.Attr<float>("dropout_prob"); if (dropout_prob == 1.0f) { dX.device(place) = static_cast<T>(0) * dY; } else { dX.device(place) = dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob); } } else { dX.device(place) = dY * M.cast<T>(); } } }; } // namespace operators } // namespace paddle
serial_tree_learner.h
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/tree_learner.h> #include <LightGBM/utils/random.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include "feature_histogram.hpp" #include "split_info.hpp" #include "data_partition.hpp" #include "leaf_splits.hpp" #include <cstdio> #include <vector> #include <random> #include <cmath> #include <memory> #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif using namespace json11; namespace LightGBM { /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data) override; void ResetConfig(const Config* config) override; Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian, Json& forced_split_json) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(const Tree* tree, double* out_score) const override { if (tree->num_leaves() <= 1) { return; } CHECK(tree->num_leaves() <= data_partition_->num_leaves()); #pragma omp parallel for schedule(static) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, const double* prediction, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; protected: /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /* Force splits with forced_split_json dict and then return num splits forced.*/ virtual int32_t ForceSplits(Tree* tree, Json& forced_split_json, int* left_leaf, int* right_leaf, int* cur_depth, bool *aborted_last_force_split); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<int8_t> is_feature_used_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; std::vector<int> valid_feature_indices_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_hessians_; #endif /*! \brief Store ordered bin */ std::vector<std::unique_ptr<OrderedBin>> ordered_bins_; /*! \brief True if has ordered bin */ bool has_ordered_bin_ = false; /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; int num_threads_; std::vector<int> ordered_bin_indices_; bool is_constant_hessian_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
hash_table.h
#ifndef HASH_TABLE_H_ #define HASH_TABLE_H_ #include "global_parameters.h" struct smer_detail{ int pro_id; int sta_pos; //int end_pos; }; struct hash_entry{ uint64_t smer; vector <smer_detail> smer_occs; }; class HASH_TABLE{ public: string seed; uint32_t seed_len; uint64_t seed_64; uint64_t ht_size; hash_entry * hash_table; void creat_hash_table(string seed); //initialize the hash table HASH_TABLE(); void get_hash_size(); // based on the seed length, get the intial size of the hash table void convert_seed(); void encode_pro(); // encode protein, and based on seed, convert it to smers void insert_smer_to_ht(uint64_t new_smer, int sta, int pro_id); //add smer to the hash table uint64_t get_ht_index(uint64_t ind, uint64_t smer); uint64_t search_in_ht(uint64_t index, uint64_t target_smer); }; void HASH_TABLE::get_hash_size(){ seed_len = seed.length(); ht_size = 0; for(boost::unordered_map<int, string> :: iterator it = p_id_seq.begin(); it != p_id_seq.end(); it ++){ ht_size = (it->second).length() + ht_size - seed_len + 1; } // select from the large prime integer list for(int a = 0; a < 440; a++) { if(hashTableSizes[a] > (2 * ht_size)) { ht_size = hashTableSizes[a]; break; } } } HASH_TABLE :: HASH_TABLE(){ } void HASH_TABLE :: creat_hash_table(string input_seed){ #ifdef PARAL #pragma omp critical(writeFile) #endif seed = input_seed; this->get_hash_size(); this->convert_seed(); hash_table = (hash_entry *) malloc(sizeof(hash_entry) * ht_size); for(uint64_t a = 0; a < ht_size; a ++){ //initialization of hash_table, set the smer as 1090715534754863 as default, since no smer would have 25 digit 1 hash_table[a].smer = hash_table_default; } this->encode_pro(); //counting the number of useful entries in the hashtable int useful_hash_entry = 0; for(uint64_t a = 0; a < ht_size; a ++){ //initialization of hash_table, set the smer as 1090715534754863 as default, since no smer would have 25 digit 1 if(hash_table[a].smer != hash_table_default){ useful_hash_entry ++; } } } void HASH_TABLE :: convert_seed(){ seed_64 = 0; for(uint32_t a = 0; a < seed.length(); a ++){ if(seed[a] == '1'){ //1, add 11111 seed_64 = (seed_64 << 5); seed_64 = (seed_64 | 31); } else{ // 0, add 00000 seed_64 = (seed_64 << 5); } } } void HASH_TABLE :: encode_pro(){ uint64_t temp_smer; //smer for each seed_length sequence uint64_t temp_seq64; //store the seed_length sequence int num_insertion = 0; for(boost::unordered_map <int, string> :: iterator it = p_id_seq.begin(); it != p_id_seq.end(); it ++){ temp_seq64 = 0; temp_smer = 0; for(uint32_t i = 0; i < (it->second).length(); i ++){ temp_seq64 = (temp_seq64 << 5); switch( (it->second).at(i)){ case 'a': case 'A': temp_seq64 = (temp_seq64 | 0); break; case 'r': case 'R': temp_seq64 = (temp_seq64 | 1); break; case 'n': case 'N': temp_seq64 = (temp_seq64 | 2); break; case 'd': case 'D': temp_seq64 = (temp_seq64 | 3); break; case 'c': case 'C': temp_seq64 = (temp_seq64 | 4); break; case 'q': case 'Q': temp_seq64 = (temp_seq64 | 5); break; case 'e': case 'E': temp_seq64 = (temp_seq64 | 6); break; case 'g': case 'G': temp_seq64 = (temp_seq64 | 7); break; case 'h': case 'H': temp_seq64 = (temp_seq64 | 8); break; case 'i': case 'I': temp_seq64 = (temp_seq64 | 9); break; case 'l': case 'L': temp_seq64 = (temp_seq64 | 10); break; case 'k': case 'K': temp_seq64 = (temp_seq64 | 11); break; case 'm': case 'M': temp_seq64 = (temp_seq64 | 12); break; case 'f': case 'F': temp_seq64 = (temp_seq64 | 13); break; case 'p': case 'P': temp_seq64 = (temp_seq64 | 14); break; case 's': case 'S': temp_seq64 = (temp_seq64 | 15); break; case 't': case 'T': temp_seq64 = (temp_seq64 | 16); break; case 'w': case 'W': temp_seq64 = (temp_seq64 | 17); break; case 'y': case 'Y': temp_seq64 = (temp_seq64 | 18); break; case 'v': case 'V': temp_seq64 = (temp_seq64 | 19); break; case 'b': case 'B': case 'z': case 'Z': case 'x': case 'X': case 'u': case 'U': case 'o': case 'O': temp_seq64 = (temp_seq64 | 20); break; default: temp_seq64 = (temp_seq64 | 31); //letters which other than 25 amino acids, will be encoded by a largest 5-digit number cout<<"Sequence: "<<it->second<<endl; cout<<"protein sequences contains letter(s) other than the 25 Amino Acids(including U and O) as shown here: http://blast.advbiocomp.com/blast-1.3/blast/PAM120"<<endl; cout<<"P_id: "<<it->first<<endl; cout<<"position: "<<i<<endl; exit(14); break; } temp_smer = (temp_seq64 & seed_64); if(i >= (seed_len - 1)){ num_insertion ++; insert_smer_to_ht(temp_smer, i-seed_len+1, it->first); } } } } void HASH_TABLE :: insert_smer_to_ht(uint64_t new_smer, int sta, int pro_id){ uint64_t temp_index = get_ht_index(new_smer % ht_size, new_smer); hash_table[temp_index].smer = new_smer; smer_detail temp_occ; temp_occ.pro_id = pro_id; temp_occ.sta_pos = sta; hash_table[temp_index].smer_occs.push_back(temp_occ); } uint64_t HASH_TABLE :: get_ht_index(uint64_t ind, uint64_t smer){ if(hash_table[ind].smer == hash_table_default) return ind; if(hash_table[ind].smer == smer) return ind; else return get_ht_index((ind + 1) % ht_size, smer); } uint64_t HASH_TABLE :: search_in_ht(uint64_t index, uint64_t target_smer){ if(hash_table[index].smer == hash_table_default){ return hash_table_default; } else if(hash_table[index].smer == target_smer){ return index; } else{ return search_in_ht((index + 1)%ht_size, target_smer); } } #endif /* HASH_TABLE_H_ */
main.c
/* Heat equation solver in 2D. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "heat.h" #include <omp.h> int main(int argc, char **argv) { double a = 0.5; //!< Diffusion constant field current, previous; //!< Current and previous temperature fields double dt; //!< Time step int nsteps; //!< Number of time steps int image_interval = 1000; //!< Image output interval int iter; //!< Iteration counter double dx2, dy2; //!< delta x and y squared double start_clock, stop_clock; //!< Time stamps #pragma omp parallel private(iter) { initialize(argc, argv, &current, &previous, &nsteps); #pragma omp single { /* Output the initial field */ write_field(&current, 0); /* Largest stable time step */ dx2 = current.dx * current.dx; dy2 = current.dy * current.dy; dt = dx2 * dy2 / (2.0 * a * (dx2 + dy2)); /* Get the start time stamp */ start_clock = omp_get_wtime(); } /* Time evolve */ for (iter = 1; iter <= nsteps; iter++) { evolve(&current, &previous, a, dt); if (iter % image_interval == 0) { #pragma omp single write_field(&current, iter); } /* Swap current field so that it will be used as previous for next iteration step */ #pragma omp single swap_fields(&current, &previous); } } /* End of parallel region */ stop_clock = omp_get_wtime(); /* Determine the CPU time used for the iteration */ printf("Iteration took %.3f seconds.\n", stop_clock - start_clock); printf("Reference value at 5,5: %f\n", previous.data[5][5]); finalize(&current, &previous); return 0; }
CLHelper.h
//------------------------------------------ //--cambine:helper function for OpenCL //--programmer: Jianbin Fang //--date: 27/12/2010 //------------------------------------------ #ifndef _CL_HELPER_ #define _CL_HELPER_ #include <CL/cl.h> #include <vector> #include <iostream> #include <fstream> #include <string> #ifdef TIMING #include "timing.h" #endif using std::string; using std::ifstream; using std::cerr; using std::endl; using std::cout; //#pragma OPENCL EXTENSION cl_nv_compiler_options:enable #define WORK_DIM 2 //work-items dimensions extern float init_time, mem_alloc_time, h2d_time, kernel_time, d2h_time, close_time, total_time; struct oclHandleStruct { cl_context context; cl_device_id *devices; cl_command_queue queue; cl_program program; cl_int cl_status; std::string error_str; std::vector<cl_kernel> kernel; }; struct oclHandleStruct oclHandles; char kernel_file[100] = "Kernels.cl"; int total_kernels = 2; string kernel_names[2] = {"BFS_1", "BFS_2"}; int work_group_size = 512; int platform_id_inuse = 0; // platform id in use (default: 0) int device_id_inuse = 0; //device id in use (default : 0) cl_device_type device_type = CL_DEVICE_TYPE_GPU; /* * Converts the contents of a file into a string */ string FileToString(const string fileName) { ifstream f(fileName.c_str(), ifstream::in | ifstream::binary); try { size_t size; char* str; string s; if(f.is_open()) { size_t fileSize; f.seekg(0, ifstream::end); size = fileSize = f.tellg(); f.seekg(0, ifstream::beg); str = new char[size+1]; if (!str) throw(string("Could not allocate memory")); f.read(str, fileSize); f.close(); str[size] = '\0'; s = str; delete [] str; return s; } } catch(std::string msg) { cerr << "Exception caught in FileToString(): " << msg << endl; if(f.is_open()) f.close(); } catch(...) { cerr << "Exception caught in FileToString()" << endl; if(f.is_open()) f.close(); } string errorMsg = "FileToString()::Error: Unable to open file " + fileName; throw(errorMsg); } //--------------------------------------- //Read command line parameters // void _clCmdParams(int argc, char* argv[]) { for (int i =0; i < argc; ++i) { switch (argv[i][1]) { case 'g': //--g stands for size of work group if (++i < argc) { sscanf(argv[i], "%u", &work_group_size); } else { std::cerr << "Could not read argument after option " << argv[i-1] << std::endl; throw; } break; case 'd': //--d stands for device id used in computaion if (++i < argc) { sscanf(argv[i], "%u", &device_id_inuse); } else { std::cerr << "Could not read argument after option " << argv[i-1] << std::endl; throw; } break; case 'p': // --p stands for platform id used in computation if (++i < argc) { sscanf(argv[i], "%u", &platform_id_inuse); } else { std::cerr << "Could not read argument after option " << argv[i-1] << std::endl; throw; } break; /* case 't': // --t stands for device type, 0:GPU, 1:CPU if (++i < argc) { sscanf(argv[i], "%u", &device_type); device_type = (device_type == 0) ? CL_DEVICE_TYPE_GPU : CL_DEVICE_TYPE_CPU; } else { std::cerr << "Could not read argument after option " << argv[i-1] << std::endl; throw; } break; */ default: ; } } } //--------------------------------------- //Initlize CL objects //--description: there are 5 steps to initialize all the OpenCL objects needed //--revised on 04/01/2011: get the number of devices and // devices have no relationship with context void _clInit() { cl_int resultCL; oclHandles.context = NULL; oclHandles.devices = NULL; oclHandles.queue = NULL; oclHandles.program = NULL; cl_uint deviceListSize; //----------------------------------------------- //--cambine-1: find the available platforms and select one cl_uint numPlatforms; cl_platform_id targetPlatform = NULL; resultCL = clGetPlatformIDs(0, NULL, &numPlatforms); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting number of platforms (clGetPlatformIDs)")); printf("number of platforms:%d\n",numPlatforms); //by cambine if (!(numPlatforms > 0)) throw (string("InitCL()::Error: No platforms found (clGetPlatformIDs)")); cl_platform_id* allPlatforms = (cl_platform_id*) malloc(numPlatforms * sizeof(cl_platform_id)); resultCL = clGetPlatformIDs(numPlatforms, allPlatforms, NULL); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting platform ids (clGetPlatformIDs)")); for (int i = 0; i < numPlatforms; i++) { char pbuff[128]; resultCL = clGetPlatformInfo( allPlatforms[i], CL_PLATFORM_VENDOR, sizeof(pbuff), pbuff, NULL); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting platform info (clGetPlatformInfo)")); printf("vendor is %s\n",pbuff); } /* Select the target platform. Default: first platform */ targetPlatform = allPlatforms[platform_id_inuse]; free(allPlatforms); //----------------------------------------------- //--cambine-3: detect OpenCL devices /* First, get the size of device list */ oclHandles.cl_status = clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_ALL, 0, NULL, &deviceListSize); if(oclHandles.cl_status!=CL_SUCCESS) { throw(string("exception in _clInit -> clGetDeviceIDs")); } if (deviceListSize == 0) throw(string("InitCL()::Error: No devices found.")); std::cout << "device number: " << deviceListSize<<std::endl; /* Now, allocate the device list */ oclHandles.devices = (cl_device_id *)malloc(deviceListSize * sizeof(cl_device_id)); if (oclHandles.devices == 0) throw(string("InitCL()::Error: Could not allocate memory.")); /* Next, get the device list data */ oclHandles.cl_status = clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_ALL, deviceListSize, oclHandles.devices, NULL); if(oclHandles.cl_status!=CL_SUCCESS) { throw(string("exception in _clInit -> clGetDeviceIDs-2")); } /* Then, get device type */ oclHandles.cl_status = clGetDeviceInfo(oclHandles.devices[device_id_inuse], CL_DEVICE_TYPE, sizeof(cl_device_type), (void *)&device_type, NULL); if (oclHandles.cl_status != CL_SUCCESS) { throw(string("error in Getting Device Info")); } if (device_type == CL_DEVICE_TYPE_GPU) printf("Creating GPU Context\n"); else if (device_type == CL_DEVICE_TYPE_CPU) printf("Creating CPU Context\n"); else throw(string("unsupported device type")); //----------------------------------------------- //--cambine-2: create an OpenCL context cl_context_properties cprops[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties)targetPlatform, 0 }; oclHandles.context = clCreateContextFromType(cprops, device_type, NULL, NULL, &resultCL); if ((resultCL != CL_SUCCESS) || (oclHandles.context == NULL)) throw (string("InitCL()::Error: Creating Context (clCreateContextFromType)")); //----------------------------------------------- //--cambine-4: Create an OpenCL command queue #ifdef TIMING oclHandles.queue = clCreateCommandQueue(oclHandles.context, oclHandles.devices[device_id_inuse], CL_QUEUE_PROFILING_ENABLE, &resultCL); #else oclHandles.queue = clCreateCommandQueue(oclHandles.context, oclHandles.devices[device_id_inuse], 0, &resultCL); #endif if ((resultCL != CL_SUCCESS) || (oclHandles.queue == NULL)) throw(string("InitCL()::Creating Command Queue. (clCreateCommandQueue)")); //----------------------------------------------- //--cambine-5: Load CL file, build CL program object, create CL kernel object std::string source_str = FileToString(kernel_file); const char * source = source_str.c_str(); size_t sourceSize[] = { source_str.length() }; oclHandles.program = clCreateProgramWithSource(oclHandles.context, 1, &source, sourceSize, &resultCL); if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL)) throw(string("InitCL()::Error: Loading Binary into cl_program. (clCreateProgramWithBinary)")); //insert debug information //std::string options= "-cl-nv-verbose"; //Doesn't work on AMD machines //options += " -cl-nv-opt-level=3"; resultCL = clBuildProgram(oclHandles.program, deviceListSize, oclHandles.devices, NULL, NULL,NULL); if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL)) { cerr << "InitCL()::Error: In clBuildProgram" << endl; size_t length; resultCL = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[device_id_inuse], CL_PROGRAM_BUILD_LOG, 0, NULL, &length); if(resultCL != CL_SUCCESS) throw(string("InitCL()::Error: Getting Program build info(clGetProgramBuildInfo)")); char* buffer = (char*)malloc(length); resultCL = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[device_id_inuse], CL_PROGRAM_BUILD_LOG, length, buffer, NULL); if(resultCL != CL_SUCCESS) throw(string("InitCL()::Error: Getting Program build info(clGetProgramBuildInfo)")); cerr << buffer << endl; free(buffer); throw(string("InitCL()::Error: Building Program (clBuildProgram)")); } //get program information in intermediate representation #ifdef PTX_MSG size_t binary_sizes[deviceListSize]; char * binaries[deviceListSize]; //figure out number of devices and the sizes of the binary for each device. oclHandles.cl_status = clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t)*deviceListSize, &binary_sizes, NULL ); if(oclHandles.cl_status!=CL_SUCCESS) { throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-2")); } std::cout<<"--cambine:"<<binary_sizes<<std::endl; //copy over all of the generated binaries. for(int i=0; i<deviceListSize; i++) binaries[i] = (char *)malloc( sizeof(char)*(binary_sizes[i]+1)); oclHandles.cl_status = clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARIES, sizeof(char *)*deviceListSize, binaries, NULL ); if(oclHandles.cl_status!=CL_SUCCESS) { throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-3")); } for(int i=0; i<deviceListSize; i++) binaries[i][binary_sizes[i]] = '\0'; std::cout<<"--cambine:writing ptd information..."<<std::endl; FILE * ptx_file = fopen("cl.ptx","w"); if(ptx_file==NULL) { throw(string("exceptions in allocate ptx file.")); } fprintf(ptx_file,"%s",binaries[DEVICE_ID_INUSE]); fclose(ptx_file); std::cout<<"--cambine:writing ptd information done."<<std::endl; for(int i=0; i<deviceListSize; i++) free(binaries[i]); #endif for (int nKernel = 0; nKernel < total_kernels; nKernel++) { /* get a kernel object handle for a kernel with the given name */ cl_kernel kernel = clCreateKernel(oclHandles.program, (kernel_names[nKernel]).c_str(), &resultCL); if ((resultCL != CL_SUCCESS) || (kernel == NULL)) { string errorMsg = "InitCL()::Error: Creating Kernel (clCreateKernel) \"" + kernel_names[nKernel] + "\""; throw(errorMsg); } oclHandles.kernel.push_back(kernel); } //get resource alocation information #ifdef RES_MSG char * build_log; size_t ret_val_size; oclHandles.cl_status = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSE], CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size); if(oclHandles.cl_status!=CL_SUCCESS) { throw(string("exceptions in _InitCL -> getting resource information")); } build_log = (char *)malloc(ret_val_size+1); oclHandles.cl_status = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSE], CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL); if(oclHandles.cl_status!=CL_SUCCESS) { throw(string("exceptions in _InitCL -> getting resources allocation information-2")); } build_log[ret_val_size] = '\0'; std::cout<<"--cambine:"<<build_log<<std::endl; free(build_log); #endif } //--------------------------------------- //release CL objects void _clRelease() { char errorFlag = false; for (int nKernel = 0; nKernel < oclHandles.kernel.size(); nKernel++) { if (oclHandles.kernel[nKernel] != NULL) { cl_int resultCL = clReleaseKernel(oclHandles.kernel[nKernel]); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseKernel" << endl; errorFlag = true; } oclHandles.kernel[nKernel] = NULL; } oclHandles.kernel.clear(); } if (oclHandles.program != NULL) { cl_int resultCL = clReleaseProgram(oclHandles.program); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseProgram" << endl; errorFlag = true; } oclHandles.program = NULL; } if (oclHandles.queue != NULL) { cl_int resultCL = clReleaseCommandQueue(oclHandles.queue); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseCommandQueue" << endl; errorFlag = true; } oclHandles.queue = NULL; } free(oclHandles.devices); if (oclHandles.context != NULL) { cl_int resultCL = clReleaseContext(oclHandles.context); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseContext" << endl; errorFlag = true; } oclHandles.context = NULL; } if (errorFlag) throw(string("ReleaseCL()::Error encountered.")); } //-------------------------------------------------------- //--cambine:create buffer and then copy data from host to device cl_mem _clCreateAndCpyMem(int size, void * h_mem_source) throw(string) { cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, \ size, h_mem_source, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem()")); #endif return d_mem; } //------------------------------------------------------- //--cambine: create read only buffer for devices //--date: 17/01/2011 cl_mem _clMallocRW(int size, void * h_mem_ptr) throw(string) { cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_WRITE, size, NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMallocRW")); #endif return d_mem; } //------------------------------------------------------- //--cambine: create read and write buffer for devices //--date: 17/01/2011 cl_mem _clMalloc(int size, void * h_mem_ptr) throw(string) { cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_WRITE_ONLY, size, NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMalloc")); #endif return d_mem; } //------------------------------------------------------- //--cambine: transfer data from host to device //--date: 17/01/2011 void _clMemcpyH2D(cl_mem d_mem, int size, const void *h_mem_ptr) throw(string) { cl_event event; oclHandles.cl_status = clEnqueueWriteBuffer(oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem_ptr, 0, NULL, &event); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMemcpyH2D")); #endif #ifdef TIMING h2d_time += probe_event_time(event, oclHandles.queue); #endif } //-------------------------------------------------------- //--cambine:create buffer and then copy data from host to device with pinned // memory cl_mem _clCreateAndCpyPinnedMem(int size, float* h_mem_source) throw(string) { cl_mem d_mem, d_mem_pinned; float * h_mem_pinned = NULL; d_mem_pinned = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, \ size, NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem()->d_mem_pinned")); #endif //------------ d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY, \ size, NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> d_mem ")); #endif //---------- h_mem_pinned = (cl_float *)clEnqueueMapBuffer(oclHandles.queue, d_mem_pinned, CL_TRUE, \ CL_MAP_WRITE, 0, size, 0, NULL, \ NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueMapBuffer")); #endif int element_number = size/sizeof(float); #pragma omp parallel for for(int i=0; i<element_number; i++) { h_mem_pinned[i] = h_mem_source[i]; } //---------- oclHandles.cl_status = clEnqueueWriteBuffer(oclHandles.queue, d_mem, \ CL_TRUE, 0, size, h_mem_pinned, \ 0, NULL, NULL); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueWriteBuffer")); #endif return d_mem; } //-------------------------------------------------------- //--cambine:create write only buffer on device cl_mem _clMallocWO(int size) throw(string) { cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_WRITE_ONLY, size, 0, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateMem()")); #endif return d_mem; } //-------------------------------------------------------- //transfer data from device to host void _clMemcpyD2H(cl_mem d_mem, int size, void * h_mem) throw(string) { cl_event event; oclHandles.cl_status = clEnqueueReadBuffer(oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem, 0,0, &event); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clCpyMemD2H -> "; switch(oclHandles.cl_status) { case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_VALUE: oclHandles.error_str += "CL_INVALID_VALUE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif #ifdef TIMING d2h_time += probe_event_time(event, oclHandles.queue); #endif } //-------------------------------------------------------- //set kernel arguments void _clSetArgs(int kernel_id, int arg_idx, void * d_mem, int size = 0) throw(string) { if(!size) { oclHandles.cl_status = clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx, sizeof(d_mem), &d_mem); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clSetKernelArg() "; switch(oclHandles.cl_status) { case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_ARG_INDEX: oclHandles.error_str += "CL_INVALID_ARG_INDEX"; break; case CL_INVALID_ARG_VALUE: oclHandles.error_str += "CL_INVALID_ARG_VALUE"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_SAMPLER: oclHandles.error_str += "CL_INVALID_SAMPLER"; break; case CL_INVALID_ARG_SIZE: oclHandles.error_str += "CL_INVALID_ARG_SIZE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif } else { oclHandles.cl_status = clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx, size, d_mem); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clSetKernelArg() "; switch(oclHandles.cl_status) { case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_ARG_INDEX: oclHandles.error_str += "CL_INVALID_ARG_INDEX"; break; case CL_INVALID_ARG_VALUE: oclHandles.error_str += "CL_INVALID_ARG_VALUE"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_SAMPLER: oclHandles.error_str += "CL_INVALID_SAMPLER"; break; case CL_INVALID_ARG_SIZE: oclHandles.error_str += "CL_INVALID_ARG_SIZE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif } } void _clFinish() throw(string) { oclHandles.cl_status = clFinish(oclHandles.queue); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clFinish"; switch(oclHandles.cl_status) { case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reasons"; break; } if(oclHandles.cl_status!=CL_SUCCESS) { throw(oclHandles.error_str); } #endif } //-------------------------------------------------------- //--cambine:enqueue kernel void _clInvokeKernel(int kernel_id, int work_items, int work_group_size) throw(string) { cl_uint work_dim = WORK_DIM; cl_event e[1]; if(work_items%work_group_size != 0) //process situations that work_items cannot be divided by work_group_size work_items = work_items + (work_group_size-(work_items%work_group_size)); size_t local_work_size[] = {work_group_size, 1}; size_t global_work_size[] = {work_items, 1}; oclHandles.cl_status = clEnqueueNDRangeKernel(oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0, \ global_work_size, local_work_size, 0, 0, &(e[0]) ); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clInvokeKernel() -> "; switch(oclHandles.cl_status) { case CL_INVALID_PROGRAM_EXECUTABLE: oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE"; break; case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_KERNEL_ARGS: oclHandles.error_str += "CL_INVALID_KERNEL_ARGS"; break; case CL_INVALID_WORK_DIMENSION: oclHandles.error_str += "CL_INVALID_WORK_DIMENSION"; break; case CL_INVALID_GLOBAL_WORK_SIZE: oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE"; break; case CL_INVALID_WORK_GROUP_SIZE: oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE"; break; case CL_INVALID_WORK_ITEM_SIZE: oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE"; break; case CL_INVALID_GLOBAL_OFFSET: oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif #ifdef TIMING kernel_time += probe_event_time(e[0], oclHandles.queue); #endif //_clFinish(); // oclHandles.cl_status = clWaitForEvents(1, &e[0]); // #ifdef ERRMSG // if (oclHandles.cl_status!= CL_SUCCESS) // throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents")); // #endif } void _clInvokeKernel2D(int kernel_id, int range_x, int range_y, int group_x, int group_y) throw(string) { cl_uint work_dim = WORK_DIM; size_t local_work_size[] = {group_x, group_y}; size_t global_work_size[] = {range_x, range_y}; cl_event e[1]; /*if(work_items%work_group_size != 0) //process situations that work_items cannot be divided by work_group_size work_items = work_items + (work_group_size-(work_items%work_group_size));*/ oclHandles.cl_status = clEnqueueNDRangeKernel(oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0, \ global_work_size, local_work_size, 0, 0, &(e[0]) ); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clInvokeKernel() -> "; switch(oclHandles.cl_status) { case CL_INVALID_PROGRAM_EXECUTABLE: oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE"; break; case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_KERNEL_ARGS: oclHandles.error_str += "CL_INVALID_KERNEL_ARGS"; break; case CL_INVALID_WORK_DIMENSION: oclHandles.error_str += "CL_INVALID_WORK_DIMENSION"; break; case CL_INVALID_GLOBAL_WORK_SIZE: oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE"; break; case CL_INVALID_WORK_GROUP_SIZE: oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE"; break; case CL_INVALID_WORK_ITEM_SIZE: oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE"; break; case CL_INVALID_GLOBAL_OFFSET: oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif #ifdef TIMING kernel_time += probe_event_time(e[0], oclHandles.queue); #endif //_clFinish(); /*oclHandles.cl_status = clWaitForEvents(1, &e[0]); #ifdef ERRMSG if (oclHandles.cl_status!= CL_SUCCESS) throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents")); #endif*/ } //-------------------------------------------------------- //release OpenCL objects void _clFree(cl_mem ob) throw(string) { if(ob!=NULL) oclHandles.cl_status = clReleaseMemObject(ob); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clFree() ->"; switch(oclHandles.cl_status) { case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if (oclHandles.cl_status!= CL_SUCCESS) throw(oclHandles.error_str); #endif } #endif //_CL_HELPER_
ams.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "float.h" #include "ams.h" #include "_hypre_utilities.hpp" /*-------------------------------------------------------------------------- * hypre_ParCSRRelax * * Relaxation on the ParCSR matrix A with right-hand side f and * initial guess u. Possible values for relax_type are: * * 1 = l1-scaled (or weighted) Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR * 3 = Kaczmarz * 4 = truncated version of 2 (Remark 6.2 in smoothers paper) * x = BoomerAMG relaxation with relax_type = |x| * (16 = Cheby) * * The default value of relax_type is 2. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex> { __host__ __device__ HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const { return x <= 4.0/3.0 * y ? y : x; } }; #endif HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */ hypre_ParCSRMatrix *A, /* right-hand side */ hypre_ParVector *f, /* relaxation type */ HYPRE_Int relax_type, /* number of sweeps */ HYPRE_Int relax_times, /* l1 norms of the rows of A */ HYPRE_Real *l1_norms, /* damping coefficient (usually <= 1) */ HYPRE_Real relax_weight, /* SOR parameter (usually in (0,2) */ HYPRE_Real omega, /* for cheby smoothers */ HYPRE_Real max_eig_est, HYPRE_Real min_eig_est, HYPRE_Int cheby_order, HYPRE_Real cheby_fraction, /* initial/updated approximation */ hypre_ParVector *u, /* temporary vector */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *z) { HYPRE_Int sweep; HYPRE_Complex *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); HYPRE_Complex *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f)); HYPRE_Complex *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v)); for (sweep = 0; sweep < relax_times; sweep++) { if (relax_type == 1) /* l1-scaled Jacobi */ { HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int sync_stream = hypre_HandleCudaComputeStreamSync(hypre_handle()); hypre_HandleCudaComputeStreamSync(hypre_handle()) = 0; #endif hypre_ParVectorCopy(f, v); hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v); #if defined(HYPRE_USING_CUDA) hypreDevice_IVAXPY(num_rows, l1_norms, v_data, u_data); #else /* #if defined(HYPRE_USING_CUDA) */ HYPRE_Int i; /* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */ #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms) #endif for (i = 0; i < num_rows; i++) { u_data[i] += v_data[i] / l1_norms[i]; } #endif /* #if defined(HYPRE_USING_CUDA) */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) hypre_HandleCudaComputeStreamSync(hypre_handle()) = sync_stream; hypre_SyncCudaComputeStream(hypre_handle()); #endif } else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); HYPRE_Real res; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); /* Copy off-diagonal values of u to the current processor */ if (num_procs > 1) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_sends; HYPRE_Real *u_buf_data; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int index = 0, start; if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */ { /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += res / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += res / l1_norms[i]; } } else if (relax_weight == 1.0) /* SSOR */ { /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += omega * res / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += omega * res / l1_norms[i]; } } else /* scaled SSOR */ { HYPRE_Real dif; HYPRE_Real c1 = omega * relax_weight; HYPRE_Real c2 = omega * (1.0 - relax_weight); /* Forward local pass (save initial guess in v_data) */ for (i = 0; i < num_rows; i++) { dif = 0.0; v_data[i] = u_data[i]; res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (A_diag_J[j] < i) dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += (c1 * res + c2 * dif) / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { dif = 0.0; res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (A_diag_J[j] > i) dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += (c1 * res + c2 * dif) / l1_norms[i]; } } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); } else if (relax_type == 3) /* Kaczmarz */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); HYPRE_Real res; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); /* Copy off-diagonal values of u to the current processor */ if (num_procs > 1) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_sends; HYPRE_Real *u_buf_data; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int index = 0, start; if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; res /= l1_norms[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) u_data[A_diag_J[j]] += omega * res * A_diag_data[j]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; res /= l1_norms[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) u_data[A_diag_J[j]] += omega * res * A_diag_data[j]; } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); } else /* call BoomerAMG relaxation */ { if (relax_type == 16) { hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1, 0, u, v, z); } else { hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight, omega, l1_norms, u, v, z); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInRangeOf * * Return a vector that belongs to the range of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorInDomainOf * * Return a vector that belongs to the domain of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixColStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockSplit * * Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel * block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data_[d][i] = x_data[dim*i+d]; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockGather * * Compose a parallel block vector x from dim given sub-vectors * x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data[dim*i+d] = x_data_[d][i]; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGBlockSolve * * Apply the block-diagonal solver diag(B) to the system diag(A) x = b. * Here B is a given BoomerAMG solver for A, while x and b are "block" * parallel vectors. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBlockSolve(void *B, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { HYPRE_Int d, dim = 1; hypre_ParVector *b_[3]; hypre_ParVector *x_[3]; dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A); if (dim == 1) { hypre_BoomerAMGSolve(B, A, b, x); return hypre_error_flag; } for (d = 0; d < dim; d++) { b_[d] = hypre_ParVectorInRangeOf(A); x_[d] = hypre_ParVectorInRangeOf(A); } hypre_ParVectorBlockSplit(b, b_, dim); hypre_ParVectorBlockSplit(x, x_, dim); for (d = 0; d < dim; d++) hypre_BoomerAMGSolve(B, A, b_[d], x_[d]); hypre_ParVectorBlockGather(x, x_, dim); for (d = 0; d < dim; d++) { hypre_ParVectorDestroy(b_[d]); hypre_ParVectorDestroy(x_[d]); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFixZeroRows * * For every zero row in the matrix: set the diagonal element to 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A) { HYPRE_Int i, j; HYPRE_Real l1_norm; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); /* a row will be considered zero if its l1 norm is less than eps */ HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ for (i = 0; i < num_rows; i++) { l1_norm = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm += fabs(A_diag_data[j]); if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm += fabs(A_offd_data[j]); if (l1_norm <= eps) { for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (A_diag_J[j] == i) A_diag_data[j] = 1.0; else A_diag_data[j] = 0.0; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) A_offd_data[j] = 0.0; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 ); if (exec == HYPRE_EXEC_HOST) { HYPRE_Int num_threads = hypre_NumThreads(); if (num_threads > 1) { return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr); } } HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1); HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; HYPRE_Real *diag_tmp = NULL; HYPRE_Int *cf_marker_offd = NULL, *cf_marker_dev = NULL; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) { cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, HYPRE_MEMORY_HOST, int_buf_data, memory_location_tmp, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (exec == HYPRE_EXEC_DEVICE) { cf_marker_dev = hypre_TAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(cf_marker_dev, cf_marker, HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); } else { cf_marker_dev = cf_marker; } } if (option == 1) { /* Set the l1 norm of the diag part */ hypre_CSRMatrixComputeRowSum(A_diag, cf_marker_dev, cf_marker_dev, l1_norm, 1, 1.0, "set"); /* Add the l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add"); } } else if (option == 2) { /* Set the abs(diag) element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1); /* Add the l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add"); } } else if (option == 3) { /* Set the CF l2 norm of the diag part */ hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set"); /* Add the CF l2 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add"); } } else if (option == 4) { /* Set the abs(diag) element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1); diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp); hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1); /* Add the scaled l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 0.5, "add"); } /* Truncate according to Remark 6.2 */ #if defined(HYPRE_USING_CUDA) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() ); } else #endif { for (i = 0; i < num_rows; i++) { if (l1_norm[i] <= 4.0/3.0 * diag_tmp[i]) { l1_norm[i] = diag_tmp[i]; } } } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { /* Set the diag element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0); #if defined(HYPRE_USING_CUDA) if ( exec == HYPRE_EXEC_DEVICE) { thrust::identity<HYPRE_Complex> identity; HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 ); } else #endif { for (i = 0; i < num_rows; i++) { if (l1_norm[i] == 0.0) { l1_norm[i] = 1.0; } } } *l1_norm_ptr = l1_norm; return hypre_error_flag; } /* Handle negative definite matrices */ if (!diag_tmp) { diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp); } /* Set the diag element */ hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0); #if defined(HYPRE_USING_CUDA) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, thrust::negate<HYPRE_Real>(), is_negative<HYPRE_Real>() ); //bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) ); bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0, thrust::minimum<HYPRE_Real>() ); if ( any_zero ) { hypre_error_in_arg(1); } } else #endif { for (i = 0; i < num_rows; i++) { if (diag_tmp[i] < 0.0) { l1_norm[i] = -l1_norm[i]; } } for (i = 0; i < num_rows; i++) { /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } } if (exec == HYPRE_EXEC_DEVICE) { hypre_TFree(cf_marker_dev, HYPRE_MEMORY_DEVICE); } hypre_TFree(cf_marker_offd, memory_location_tmp); hypre_TFree(diag_tmp, memory_location_tmp); *l1_norm_ptr = l1_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDiagRows * * For every row containing only a diagonal element: set it to d. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); for (i = 0; i < num_rows; i++) { j = A_diag_I[i]; if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) && (!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i]))) { A_diag_data[j] = d; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSCreate * * Allocate the AMS solver structure. *--------------------------------------------------------------------------*/ void * hypre_AMSCreate() { hypre_AMSData *ams_data; ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST); /* Default parameters */ ams_data -> dim = 3; /* 3D problem */ ams_data -> maxit = 20; /* perform at most 20 iterations */ ams_data -> tol = 1e-6; /* convergence tolerance */ ams_data -> print_level = 1; /* print residual norm at each step */ ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */ ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */ ams_data -> A_relax_times = 1; /* one relaxation sweep */ ams_data -> A_relax_weight = 1.0; /* damping parameter */ ams_data -> A_omega = 1.0; /* SSOR coefficient */ ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */ ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */ ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_G_theta = 0.25; /* strength threshold */ ams_data -> B_G_interp_type = 0; /* interpolation type */ ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_Pi_theta = 0.25; /* strength threshold */ ams_data -> B_Pi_interp_type = 0; /* interpolation type */ ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> beta_is_zero = 0; /* the problem has a mass term */ /* By default, do l1-GS smoothing on the coarsest grid */ ams_data -> B_G_coarse_relax_type = 8; ams_data -> B_Pi_coarse_relax_type = 8; /* The rest of the fields are initialized using the Set functions */ ams_data -> A = NULL; ams_data -> G = NULL; ams_data -> A_G = NULL; ams_data -> B_G = 0; ams_data -> Pi = NULL; ams_data -> A_Pi = NULL; ams_data -> B_Pi = 0; ams_data -> x = NULL; ams_data -> y = NULL; ams_data -> z = NULL; ams_data -> Gx = NULL; ams_data -> Gy = NULL; ams_data -> Gz = NULL; ams_data -> r0 = NULL; ams_data -> g0 = NULL; ams_data -> r1 = NULL; ams_data -> g1 = NULL; ams_data -> r2 = NULL; ams_data -> g2 = NULL; ams_data -> Pix = NULL; ams_data -> Piy = NULL; ams_data -> Piz = NULL; ams_data -> A_Pix = NULL; ams_data -> A_Piy = NULL; ams_data -> A_Piz = NULL; ams_data -> B_Pix = 0; ams_data -> B_Piy = 0; ams_data -> B_Piz = 0; ams_data -> interior_nodes = NULL; ams_data -> G0 = NULL; ams_data -> A_G0 = NULL; ams_data -> B_G0 = 0; ams_data -> projection_frequency = 5; ams_data -> A_l1_norms = NULL; ams_data -> A_max_eig_est = 0; ams_data -> A_min_eig_est = 0; ams_data -> owns_Pi = 1; ams_data -> owns_A_G = 0; ams_data -> owns_A_Pi = 0; return (void *) ams_data; } /*-------------------------------------------------------------------------- * hypre_AMSDestroy * * Deallocate the AMS solver structure. Note that the input data (given * through the Set functions) is not destroyed. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (!ams_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (ams_data -> owns_A_G) if (ams_data -> A_G) hypre_ParCSRMatrixDestroy(ams_data -> A_G); if (!ams_data -> beta_is_zero) if (ams_data -> B_G) HYPRE_BoomerAMGDestroy(ams_data -> B_G); if (ams_data -> owns_Pi && ams_data -> Pi) hypre_ParCSRMatrixDestroy(ams_data -> Pi); if (ams_data -> owns_A_Pi) if (ams_data -> A_Pi) hypre_ParCSRMatrixDestroy(ams_data -> A_Pi); if (ams_data -> B_Pi) HYPRE_BoomerAMGDestroy(ams_data -> B_Pi); if (ams_data -> owns_Pi && ams_data -> Pix) hypre_ParCSRMatrixDestroy(ams_data -> Pix); if (ams_data -> A_Pix) hypre_ParCSRMatrixDestroy(ams_data -> A_Pix); if (ams_data -> B_Pix) HYPRE_BoomerAMGDestroy(ams_data -> B_Pix); if (ams_data -> owns_Pi && ams_data -> Piy) hypre_ParCSRMatrixDestroy(ams_data -> Piy); if (ams_data -> A_Piy) hypre_ParCSRMatrixDestroy(ams_data -> A_Piy); if (ams_data -> B_Piy) HYPRE_BoomerAMGDestroy(ams_data -> B_Piy); if (ams_data -> owns_Pi && ams_data -> Piz) hypre_ParCSRMatrixDestroy(ams_data -> Piz); if (ams_data -> A_Piz) hypre_ParCSRMatrixDestroy(ams_data -> A_Piz); if (ams_data -> B_Piz) HYPRE_BoomerAMGDestroy(ams_data -> B_Piz); if (ams_data -> r0) hypre_ParVectorDestroy(ams_data -> r0); if (ams_data -> g0) hypre_ParVectorDestroy(ams_data -> g0); if (ams_data -> r1) hypre_ParVectorDestroy(ams_data -> r1); if (ams_data -> g1) hypre_ParVectorDestroy(ams_data -> g1); if (ams_data -> r2) hypre_ParVectorDestroy(ams_data -> r2); if (ams_data -> g2) hypre_ParVectorDestroy(ams_data -> g2); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> A); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> G0); if (ams_data -> A_G0) hypre_ParCSRMatrixDestroy(ams_data -> A_G0); if (ams_data -> B_G0) HYPRE_BoomerAMGDestroy(ams_data -> B_G0); hypre_SeqVectorDestroy(ams_data -> A_l1_norms); /* G, x, y ,z, Gx, Gy and Gz are not destroyed */ if (ams_data) { hypre_TFree(ams_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDimension * * Set problem dimension (2 or 3). By default we assume dim = 3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDimension(void *solver, HYPRE_Int dim) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (dim != 2 && dim != 3) hypre_error_in_arg(2); ams_data -> dim = dim; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDiscreteGradient * * Set the discrete gradient matrix G. * This function should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver, hypre_ParCSRMatrix *G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> G = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCoordinateVectors * * Set the x, y and z coordinates of the vertices in the mesh. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver, hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector *z) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> x = x; ams_data -> y = y; ams_data -> z = z; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetEdgeConstantVectors * * Set the vectors Gx, Gy and Gz which give the representations of * the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the * edge element basis. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Gx = Gx; ams_data -> Gy = Gy; ams_data -> Gz = Gz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInterpolations * * Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz]. * * This function is generally intended to be used only for high-order Nedelec * discretizations (in the lowest order case, Pi is constructed internally in * AMS from the discreet gradient matrix and the coordinates of the vertices), * though it can also be used in the lowest-order case or for other types of * discretizations (e.g. ones based on the second family of Nedelec elements). * * By definition, Pi is the matrix representation of the linear operator that * interpolates (high-order) vector nodal finite elements into the (high-order) * Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0) * and similarly for Piy and Piz. Note that all these operators depend on the * choice of the basis and degrees of freedom in the high-order spaces. * * The column numbering of Pi should be node-based, i.e. the x/y/z components of * the first node (vertex or high-order dof) should be listed first, followed by * the x/y/z components of the second node and so on (see the documentation of * HYPRE_BoomerAMGSetDofFunc). * * If used, this function should be called before hypre_AMSSetup() and there is * no need to provide the vertex coordinates. Furthermore, only one of the sets * {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide * both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with * cycle_type > 10, will be unavailable. Similarly, AMS cycles based on * monolithic Pi (cycle_type < 10) require that Pi is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInterpolations(void *solver, hypre_ParCSRMatrix *Pi, hypre_ParCSRMatrix *Pix, hypre_ParCSRMatrix *Piy, hypre_ParCSRMatrix *Piz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Pi = Pi; ams_data -> Pix = Pix; ams_data -> Piy = Piy; ams_data -> Piz = Piz; ams_data -> owns_Pi = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * alpha (the curl-curl term coefficient in the Maxwell problem). * * If this function is called, the coarse space solver on the range * of Pi^T is a block-diagonal version of A_Pi. If this function is not * called, the coarse space solver on the range of Pi^T is constructed * as Pi^T A Pi in hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_Pi) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_Pi = A_Pi; /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * beta (the mass term coefficient in the Maxwell problem). * * This function call is optional - if not given, the Poisson matrix will * be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume * that beta is 0 and use two-level (instead of three-level) methods. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_G = A_G; if (!A_G) ams_data -> beta_is_zero = 1; else { /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */ } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInteriorNodes * * Set the list of nodes which are interior to the zero-conductivity region. * A node is interior if interior_nodes[i] == 1.0. * * Should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInteriorNodes(void *solver, hypre_ParVector *interior_nodes) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> interior_nodes = interior_nodes; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetProjectionFrequency * * How often to project the r.h.s. onto the compatible sub-space Ker(G0^T), * when iterating with the solver. * * The default value is every 5th iteration. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver, HYPRE_Int projection_frequency) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> projection_frequency = projection_frequency; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetMaxIter * * Set the maximum number of iterations in the three-level method. * The default value is 20. To use the AMS solver as a preconditioner, * set maxit to 1, tol to 0.0 and print_level to 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetMaxIter(void *solver, HYPRE_Int maxit) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> maxit = maxit; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetTol * * Set the convergence tolerance (if the method is used as a solver). * The default value is 1e-6. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetTol(void *solver, HYPRE_Real tol) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> tol = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCycleType * * Choose which three-level solver to use. Possible values are: * * 1 = 3-level multipl. solver (01210) <-- small solution time * 2 = 3-level additive solver (0+1+2) * 3 = 3-level multipl. solver (02120) * 4 = 3-level additive solver (010+2) * 5 = 3-level multipl. solver (0102010) <-- small solution time * 6 = 3-level additive solver (1+020) * 7 = 3-level multipl. solver (0201020) <-- small number of iterations * 8 = 3-level additive solver (0(1+2)0) <-- small solution time * 9 = 3-level multipl. solver (01210) with discrete divergence * 11 = 5-level multipl. solver (013454310) <-- small solution time, memory * 12 = 5-level additive solver (0+1+3+4+5) * 13 = 5-level multipl. solver (034515430) <-- small solution time, memory * 14 = 5-level additive solver (01(3+4+5)10) * 20 = 2-level multipl. solver (0[12]0) * * 0 = a Hiptmair-like smoother (010) * * The default value is 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCycleType(void *solver, HYPRE_Int cycle_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> cycle_type = cycle_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetPrintLevel * * Control how much information is printed during the solution iterations. * The defaut values is 1 (print residual norm at each step). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetPrintLevel(void *solver, HYPRE_Int print_level) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> print_level = print_level; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetSmoothingOptions * * Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver, HYPRE_Int A_relax_type, HYPRE_Int A_relax_times, HYPRE_Real A_relax_weight, HYPRE_Real A_omega) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_relax_type = A_relax_type; ams_data -> A_relax_times = A_relax_times; ams_data -> A_relax_weight = A_relax_weight; ams_data -> A_omega = A_omega; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetChebySmoothingOptions * AB: note: this could be added to the above, * but I didn't want to change parameter list) * Set parameters for chebyshev smoother for A. Default values: 2,.3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver, HYPRE_Int A_cheby_order, HYPRE_Int A_cheby_fraction) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_cheby_order = A_cheby_order; ams_data -> A_cheby_fraction = A_cheby_fraction; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGOptions * * Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver, HYPRE_Int B_Pi_coarsen_type, HYPRE_Int B_Pi_agg_levels, HYPRE_Int B_Pi_relax_type, HYPRE_Real B_Pi_theta, HYPRE_Int B_Pi_interp_type, HYPRE_Int B_Pi_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type; ams_data -> B_Pi_agg_levels = B_Pi_agg_levels; ams_data -> B_Pi_relax_type = B_Pi_relax_type; ams_data -> B_Pi_theta = B_Pi_theta; ams_data -> B_Pi_interp_type = B_Pi_interp_type; ams_data -> B_Pi_Pmax = B_Pi_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_Pi. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver, HYPRE_Int B_Pi_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *)solver; ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGOptions * * Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver, HYPRE_Int B_G_coarsen_type, HYPRE_Int B_G_agg_levels, HYPRE_Int B_G_relax_type, HYPRE_Real B_G_theta, HYPRE_Int B_G_interp_type, HYPRE_Int B_G_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarsen_type = B_G_coarsen_type; ams_data -> B_G_agg_levels = B_G_agg_levels; ams_data -> B_G_relax_type = B_G_relax_type; ams_data -> B_G_theta = B_G_theta; ams_data -> B_G_interp_type = B_G_interp_type; ams_data -> B_G_Pmax = B_G_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_G. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver, HYPRE_Int B_G_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePi * * Construct the Pi interpolation matrix, which maps the space of vector * linear finite elements to the space of edge finite elements. * * The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z], * where each block has the same sparsity structure as G, and the entries * can be computed from the vectors Gx, Gy, Gz. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pi_ptr) { hypre_ParCSRMatrix *Pi; /* Compute Pi = [Pi_x, Pi_y, Pi_z] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); #ifdef HYPRE_NO_GLOBAL_PARTITION col_starts_size = 2; #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); col_starts_size = num_procs+1; #endif col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i]; Pi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pi) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(Pi) = 1; hypre_ParCSRMatrixInitialize(Pi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi); HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag); HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag); HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag); for (i = 0; i < G_diag_nrows+1; i++) Pi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi); HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd); HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd); HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) Pi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim*G_cmap[i]+(HYPRE_BigInt)d; } } *Pi_ptr = Pi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePixyz * * Construct the components Pix, Piy, Piz of the interpolation matrix Pi, * which maps the space of vector linear finite elements to the space of * edge finite elements. * * The construction is based on the fact that each component has the same * sparsity structure as G, and the entries can be computed from the vectors * Gx, Gy, Gz. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pix_ptr, hypre_ParCSRMatrix **Piy_ptr, hypre_ParCSRMatrix **Piz_ptr) { hypre_ParCSRMatrix *Pix, *Piy, *Piz; /* Compute Pix, Piy, Piz */ { HYPRE_Int i, j; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); Pix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pix) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(Pix) = 0; hypre_ParCSRMatrixInitialize(Pix); Piy = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piy) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(Piy) = 0; hypre_ParCSRMatrixInitialize(Piy); if (dim == 3) { Piz = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piz) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(Piz) = 0; hypre_ParCSRMatrixInitialize(Piz); } Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz); HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag); HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag); HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag); for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; Piz_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; Piz_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; *Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } else { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } } /* Fill-in the off-diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz); HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd); HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd); HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; Piz_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; Piz_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; *Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; Piz_cmap[i] = G_cmap[i]; } } else { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; } } } *Pix_ptr = Pix; *Piy_ptr = Piy; if (dim == 3) *Piz_ptr = Piz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputeGPi * * Construct the matrix [G,Pi] which can be considered an interpolation * matrix from S_h^4 (4 copies of the scalar linear finite element space) * to the edge finite elements space. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **GPi_ptr) { hypre_ParCSRMatrix *GPi; /* Take into account G */ dim++; /* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); #ifdef HYPRE_NO_GLOBAL_PARTITION col_starts_size = 2; #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); col_starts_size = num_procs+1; #endif col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i]; GPi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(GPi) = 1; hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0; hypre_ParCSRMatrixOwnsColStarts(GPi) = 1; hypre_ParCSRMatrixInitialize(GPi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 4) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi); HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag); HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag); HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag); for (i = 0; i < G_diag_nrows+1; i++) GPi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *GPi_diag_data++ = G_diag_data[j]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi); HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd); HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd); HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) GPi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *GPi_offd_data++ = G_offd_data[j]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) GPi_cmap[dim*i+d] = dim*G_cmap[i]+d; } } *GPi_ptr = GPi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetup * * Construct the AMS solver components. * * The following functions need to be called before hypre_AMSSetup(): * - hypre_AMSSetDimension() (if solving a 2D problem) * - hypre_AMSSetDiscreteGradient() * - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int input_info = 0; ams_data -> A = A; /* Modifications for problems with zero-conductivity regions */ if (ams_data -> interior_nodes) { hypre_ParCSRMatrix *G0t, *Aorig = A; /* Make sure that multiple Setup()+Solve() give identical results */ ams_data -> solve_counter = 0; /* Construct the discrete gradient matrix for the zero-conductivity region by eliminating the zero-conductivity nodes from G^t. The range of G0 represents the kernel of A, i.e. the gradients of nodal basis functions supported in zero-conductivity regions. */ hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1); { HYPRE_Int i, j; HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G); hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t); HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td); HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td); hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t); HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to); HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to); HYPRE_Real *interior_nodes_data=hypre_VectorData( hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes)); for (i = 0; i < nv; i++) { if (interior_nodes_data[i] != 1) { for (j = G0tdI[i]; j < G0tdI[i+1]; j++) G0tdA[j] = 0.0; if (G0toI) for (j = G0toI[i]; j < G0toI[i+1]; j++) G0toA[j] = 0.0; } } } hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1); /* Construct the subspace matrix A_G0 = G0^T G0 */ ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0); hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0); /* Create AMG solver for A_G0 */ HYPRE_BoomerAMGCreate(&ams_data -> B_G0); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */ HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3); HYPRE_BoomerAMGSetup(ams_data -> B_G0, (HYPRE_ParCSRMatrix)ams_data -> A_G0, 0, 0); /* Construct the preconditioner for ams_data->A = A + G0 G0^T. NOTE: this can be optimized significantly by taking into account that the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */ { hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t); hypre_ParCSRMatrix *B = Aorig; hypre_ParCSRMatrix **C_ptr = &ams_data -> A; hypre_ParCSRMatrix *C; HYPRE_Real factor, lfactor; /* scale (penalize) G0 G0^T before adding it to the matrix */ { HYPRE_Int i; HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B)); HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B)); HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B)); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B)); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B)); lfactor = -1; for (i = 0; i < B_diag_i[B_num_rows]; i++) if (fabs(B_diag_data[i]) > lfactor) lfactor = fabs(B_diag_data[i]); for (i = 0; i < B_offd_i[B_num_rows]; i++) if (fabs(B_offd_data[i]) > lfactor) lfactor = fabs(B_offd_data[i]); lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */ hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); } hypre_ParcsrAdd(factor, A, 1.0, B, &C); /*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B);*/ /* scale (penalize) G0 G0^T before adding it to the matrix */ /*{ HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local); HYPRE_Real *data = hypre_CSRMatrixData(A_local); HYPRE_Real *dataB = hypre_CSRMatrixData(B_local); HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local); HYPRE_Real factor, lfactor; lfactor = -1; for (i = 0; i < nnzB; i++) if (fabs(dataB[i]) > lfactor) lfactor = fabs(dataB[i]); lfactor *= 1e-10; hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); for (i = 0; i < nnz; i++) data[i] *= factor; } C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local); C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0); if (C_local) hypre_CSRMatrixDestroy(C_tmp); else C_local = C_tmp; C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 1; hypre_ParCSRMatrixOwnsColStarts(G0t) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); */ hypre_ParCSRMatrixDestroy(A); *C_ptr = C; } hypre_ParCSRMatrixDestroy(G0t); } /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */ /* Compute the l1 norm of the rows of A */ if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4) { HYPRE_Real *l1_norm_data = NULL; hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data); ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A)); hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data; hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms, hypre_ParCSRMatrixMemoryLocation(ams_data -> A)); } /* Chebyshev? */ if (ams_data -> A_relax_type == 16) { hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10, &ams_data->A_max_eig_est, &ams_data->A_min_eig_est); } /* If not given, compute Gx, Gy and Gz */ { if (ams_data -> x != NULL && ams_data -> y != NULL && (ams_data -> dim == 2 || ams_data -> z != NULL)) input_info = 1; if (ams_data -> Gx != NULL && ams_data -> Gy != NULL && (ams_data -> dim == 2 || ams_data -> Gz != NULL)) input_info = 2; if (input_info == 1) { ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx); ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy); if (ams_data -> dim == 3) { ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz); } } } if (ams_data -> Pi == NULL && ams_data -> Pix == NULL) { if (ams_data -> cycle_type == 20) /* Construct the combined interpolation matrix [G,Pi] */ hypre_AMSComputeGPi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); else if (ams_data -> cycle_type > 10) /* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */ hypre_AMSComputePixyz(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pix, &ams_data -> Piy, &ams_data -> Piz); else /* Construct the Pi interpolation matrix */ hypre_AMSComputePi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); } /* Keep Gx, Gy and Gz only if use the method with discrete divergence stabilization (where we use them to compute the local mesh size). */ if (input_info == 1 && ams_data -> cycle_type != 9) { hypre_ParVectorDestroy(ams_data -> Gx); hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } /* Create the AMG solver on the range of G^T */ if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20) { HYPRE_BoomerAMGCreate(&ams_data -> B_G); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2); /* If not given, construct the coarse space matrix by RAP */ if (!ams_data -> A_G) { HYPRE_Int G_owned_col_starts; if (!hypre_ParCSRMatrixCommPkg(ams_data -> G)) hypre_MatvecCommPkgCreate(ams_data -> G); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G); hypre_BoomerAMGBuildCoarseOperator(ams_data -> G, ams_data -> A, ams_data -> G, &ams_data -> A_G); /* Make sure that A_G has no zero rows (this can happen if beta is zero in part of the domain). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G); hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts; hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0; ams_data -> owns_A_G = 1; } HYPRE_BoomerAMGSetup(ams_data -> B_G, (HYPRE_ParCSRMatrix)ams_data -> A_G, 0, 0); } if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20) /* Create the AMG solvers on the range of Pi{x,y,z}^T */ { HYPRE_Int P_owned_col_starts; HYPRE_BoomerAMGCreate(&ams_data -> B_Pix); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piy); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piz); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2); /* Generally, don't use exact solve on the coarsest level (matrices may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2); } /* Construct the coarse space matrices by RAP */ if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix)) hypre_MatvecCommPkgCreate(ams_data -> Pix); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix, ams_data -> A, ams_data -> Pix, &ams_data -> A_Pix); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0; } /* Make sure that A_Pix has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix); HYPRE_BoomerAMGSetup(ams_data -> B_Pix, (HYPRE_ParCSRMatrix)ams_data -> A_Pix, 0, 0); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy)) hypre_MatvecCommPkgCreate(ams_data -> Piy); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy, ams_data -> A, ams_data -> Piy, &ams_data -> A_Piy); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0; } /* Make sure that A_Piy has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy); HYPRE_BoomerAMGSetup(ams_data -> B_Piy, (HYPRE_ParCSRMatrix)ams_data -> A_Piy, 0, 0); if (ams_data -> Piz) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz)) hypre_MatvecCommPkgCreate(ams_data -> Piz); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz, ams_data -> A, ams_data -> Piz, &ams_data -> A_Piz); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0; } /* Make sure that A_Piz has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz); HYPRE_BoomerAMGSetup(ams_data -> B_Piz, (HYPRE_ParCSRMatrix)ams_data -> A_Piz, 0, 0); } } else /* Create the AMG solver on the range of Pi^T */ { HYPRE_BoomerAMGCreate(&ams_data -> B_Pi); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2); /* If not given, construct the coarse space matrix by RAP and notify BoomerAMG that this is a dim x dim block system. */ if (!ams_data -> A_Pi) { HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi)) hypre_MatvecCommPkgCreate(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); if (ams_data -> cycle_type == 9) { /* Add a discrete divergence term to A before computing Pi^t A Pi */ { hypre_ParCSRMatrix *Gt, *GGt, *ApGGt; hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1); hypre_ParCSRMatrixOwnsColStarts(Gt) = 0; hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0; /* scale GGt by h^2 */ { HYPRE_Real h2; HYPRE_Int i, j, k, ne; hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt); HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag); HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag); HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag); HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag); hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt); HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd); HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd); HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx)); HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy)); HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz)); for (i = 0; i < Gt_num_rows; i++) { /* determine the characteristic mesh size for vertex i */ h2 = 0.0; ne = 0; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) { k = Gt_diag_J[j]; h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k]; ne++; } if (ne != 0) { h2 /= ne; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) Gt_diag_data[j] *= h2; for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++) Gt_offd_data[j] *= h2; } } } /* we only needed Gx, Gy and Gz to compute the local mesh size */ if (input_info == 1) { hypre_ParVectorDestroy(ams_data -> Gx); hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } GGt = hypre_ParMatmul(ams_data -> G, Gt); hypre_ParCSRMatrixDestroy(Gt); /* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */ hypre_ParcsrAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt); /*{ hypre_ParCSRMatrix *A = GGt; hypre_ParCSRMatrix *B = ams_data -> A; hypre_ParCSRMatrix **C_ptr = &ApGGt; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); C_local = hypre_CSRMatrixBigAdd(A_local, B_local); hypre_CSRMatrixBigJtoJ(C_local); C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); *C_ptr = C; }*/ hypre_ParCSRMatrixDestroy(GGt); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ApGGt, ams_data -> Pi, &ams_data -> A_Pi); } } else { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ams_data -> A, ams_data -> Pi, &ams_data -> A_Pi); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0; } ams_data -> owns_A_Pi = 1; if (ams_data -> cycle_type != 20) HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim); else HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1); /* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */ } /* Make sure that A_Pi has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi); HYPRE_BoomerAMGSetup(ams_data -> B_Pi, (HYPRE_ParCSRMatrix)ams_data -> A_Pi, 0, 0); } /* Allocate temporary vectors */ ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A); ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A); if (ams_data -> A_G) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G); } if (ams_data -> r1 == NULL && ams_data -> A_Pix) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); } if (ams_data -> Pi) { ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi); ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSolve * * Solve the system A x = b. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSolve(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, my_id = -1; HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid; char cycle[30]; hypre_ParCSRMatrix *Ai[5], *Pi[5]; HYPRE_Solver Bi[5]; HYPRE_PtrToSolverFcn HBi[5]; hypre_ParVector *ri[5], *gi[5]; hypre_ParVector *z = NULL; Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G; Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi; Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix; Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy; Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz; Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve; Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; ri[0] = ams_data -> r1; gi[0] = ams_data -> g1; ri[1] = ams_data -> r2; gi[1] = ams_data -> g2; ri[2] = ams_data -> r1; gi[2] = ams_data -> g1; ri[3] = ams_data -> r1; gi[3] = ams_data -> g1; ri[4] = ams_data -> r1; gi[4] = ams_data -> g1; /* may need to create an additional temporary vector for relaxation */ if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16) { z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(z); hypre_ParVectorSetPartitioningOwner(z,0); } if (ams_data -> print_level > 0) hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id); /* Compatible subspace projection for problems with zero-conductivity regions. Note that this modifies the input (r.h.s.) vector b! */ if ( (ams_data -> B_G0) && (++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) ) { /* hypre_printf("Projecting onto the compatible subspace...\n"); */ hypre_AMSProjectOutGradients(ams_data, b); } if (ams_data -> beta_is_zero) { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","0"); break; case 1: case 3: case 5: case 7: default: hypre_sprintf(cycle,"%s","020"); break; case 2: case 4: case 6: case 8: hypre_sprintf(cycle,"%s","(0+2)"); break; case 11: case 13: hypre_sprintf(cycle,"%s","0345430"); break; case 12: hypre_sprintf(cycle,"%s","(0+3+4+5)"); break; case 14: hypre_sprintf(cycle,"%s","0(+3+4+5)0"); break; } } else { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","010"); break; case 1: default: hypre_sprintf(cycle,"%s","01210"); break; case 2: hypre_sprintf(cycle,"%s","(0+1+2)"); break; case 3: hypre_sprintf(cycle,"%s","02120"); break; case 4: hypre_sprintf(cycle,"%s","(010+2)"); break; case 5: hypre_sprintf(cycle,"%s","0102010"); break; case 6: hypre_sprintf(cycle,"%s","(020+1)"); break; case 7: hypre_sprintf(cycle,"%s","0201020"); break; case 8: hypre_sprintf(cycle,"%s","0(+1+2)0"); break; case 9: hypre_sprintf(cycle,"%s","01210"); break; case 11: hypre_sprintf(cycle,"%s","013454310"); break; case 12: hypre_sprintf(cycle,"%s","(0+1+3+4+5)"); break; case 13: hypre_sprintf(cycle,"%s","034515430"); break; case 14: hypre_sprintf(cycle,"%s","01(+3+4+5)10"); break; case 20: hypre_sprintf(cycle,"%s","020"); break; } } for (i = 0; i < ams_data -> maxit; i++) { /* Compute initial residual norms */ if (ams_data -> maxit > 1 && i == 0) { hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); r0_norm = r_norm; b_norm = sqrt(hypre_ParVectorInnerProd(b, b)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", r_norm, relative_resid); } } /* Apply the preconditioner */ hypre_ParCSRSubspacePrec(ams_data -> A, ams_data -> A_relax_type, ams_data -> A_relax_times, ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL, ams_data -> A_relax_weight, ams_data -> A_omega, ams_data -> A_max_eig_est, ams_data -> A_min_eig_est, ams_data -> A_cheby_order, ams_data -> A_cheby_fraction, Ai, Bi, HBi, Pi, ri, gi, b, x, ams_data -> r0, ams_data -> g0, cycle, z); /* Compute new residual norms */ if (ams_data -> maxit > 1) { old_resid = r_norm; hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) hypre_printf(" Cycle %2d %e %f %e \n", i+1, r_norm, r_norm / old_resid, relative_resid); } if (relative_resid < ams_data -> tol) { i++; break; } } if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1) hypre_printf("\n\n Average Convergence Factor = %f\n\n", pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i))); ams_data -> num_iterations = i; ams_data -> rel_resid_norm = relative_resid; if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0) hypre_error(HYPRE_ERROR_CONV); if (z) hypre_ParVectorDestroy(z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRSubspacePrec * * General subspace preconditioner for A0 y = x, based on ParCSR storage. * * P[i] and A[i] are the interpolation and coarse grid matrices for * the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i] * are temporary vectors. A0_* are the fine grid smoothing parameters. * * The default mode is multiplicative, '+' changes the next correction * to additive, based on residual computed at '('. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */ hypre_ParCSRMatrix *A0, /* relaxation parameters */ HYPRE_Int A0_relax_type, HYPRE_Int A0_relax_times, HYPRE_Real *A0_l1_norms, HYPRE_Real A0_relax_weight, HYPRE_Real A0_omega, HYPRE_Real A0_max_eig_est, HYPRE_Real A0_min_eig_est, HYPRE_Int A0_cheby_order, HYPRE_Real A0_cheby_fraction, /* subspace matrices */ hypre_ParCSRMatrix **A, /* subspace preconditioners */ HYPRE_Solver *B, /* hypre solver functions for B */ HYPRE_PtrToSolverFcn *HB, /* subspace interpolations */ hypre_ParCSRMatrix **P, /* temporary subspace vectors */ hypre_ParVector **r, hypre_ParVector **g, /* right-hand side */ hypre_ParVector *x, /* current approximation */ hypre_ParVector *y, /* current residual */ hypre_ParVector *r0, /* temporary vector */ hypre_ParVector *g0, char *cycle, /* temporary vector */ hypre_ParVector *z) { char *op; HYPRE_Int use_saved_residual = 0; for (op = cycle; *op != '\0'; op++) { /* do nothing */ if (*op == ')') continue; /* compute the residual: r = x - Ay */ else if (*op == '(') { hypre_ParVectorCopy(x,r0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0); } /* switch to additive correction */ else if (*op == '+') { use_saved_residual = 1; continue; } /* smooth: y += S (x - Ay) */ else if (*op == '0') { hypre_ParCSRRelax(A0, x, A0_relax_type, A0_relax_times, A0_l1_norms, A0_relax_weight, A0_omega, A0_max_eig_est, A0_min_eig_est, A0_cheby_order, A0_cheby_fraction, y, g0, z); } /* subspace correction: y += P B^{-1} P^t r */ else { HYPRE_Int i = *op - '1'; if (i < 0) hypre_error_in_arg(16); /* skip empty subspaces */ if (!A[i]) continue; /* compute the residual? */ if (use_saved_residual) { use_saved_residual = 0; hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]); } else { hypre_ParVectorCopy(x,g0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0); hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]); } hypre_ParVectorSetConstantValues(g[i], 0.0); (*HB[i]) (B[i], (HYPRE_Matrix)A[i], (HYPRE_Vector)r[i], (HYPRE_Vector)g[i]); hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0); hypre_ParVectorAxpy(1.0, g0, y); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetNumIterations * * Get the number of AMS iterations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetNumIterations(void *solver, HYPRE_Int *num_iterations) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *num_iterations = ams_data -> num_iterations; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetFinalRelativeResidualNorm * * Get the final relative residual norm in AMS. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver, HYPRE_Real *rel_resid_norm) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *rel_resid_norm = ams_data -> rel_resid_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSProjectOutGradients * * For problems with zero-conductivity regions, project the vector onto the * compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the * discrete gradient restricted to the interior nodes of the regions with * zero conductivity. This ensures that x is orthogonal to the gradients in * the range of G0. * * This function is typically called after the solution iteration is complete, * in order to facilitate the visualization of the computed field. Without it * the values in the zero-conductivity regions contain kernel components. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSProjectOutGradients(void *solver, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> B_G0) { hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1); hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0); hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1); hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0); hypre_ParVectorAxpy(-1.0, ams_data -> g0, x); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSConstructDiscreteGradient * * Construct and return the lowest-order discrete gradient matrix G, based on: * - a matrix on the egdes (e.g. the stiffness matrix A) * - a vector on the vertices (e.g. the x coordinates) * - the array edge_vertex, which lists the global indexes of the * vertices of the local edges. * * We assume that edge_vertex lists the edge vertices consecutively, * and that the orientation of all edges is consistent. More specificaly: * If edge_orientation = 1, the edges are already oriented. * If edge_orientation = 2, the orientation of edge i depends only on the * sign of edge_vertex[2*i+1] - edge_vertex[2*i]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A, hypre_ParVector *x_coord, HYPRE_BigInt *edge_vertex, HYPRE_Int edge_orientation, hypre_ParCSRMatrix **G_ptr) { hypre_ParCSRMatrix *G; HYPRE_Int nedges; nedges = hypre_ParCSRMatrixNumRows(A); /* Construct the local part of G based on edge_vertex and the edge and vertex partitionings from A and x_coord */ { HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST); HYPRE_Int part_size; HYPRE_BigInt *row_starts, *col_starts; HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges, hypre_ParVectorGlobalSize(x_coord), 2*nedges); for (i = 0; i <= nedges; i++) I[i] = 2*i; if (edge_orientation == 1) { /* Assume that the edges are already oriented */ for (i = 0; i < 2*nedges; i+=2) { data[i] = -1.0; data[i+1] = 1.0; } } else if (edge_orientation == 2) { /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*nedges; i+=2) { if (edge_vertex[i] < edge_vertex[i+1]) { data[i] = -1.0; data[i+1] = 1.0; } else { data[i] = 1.0; data[i+1] = -1.0; } } } else { hypre_error_in_arg(4); } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = nedges; /* Copy partitioning from A and x_coord (previously they were re-used) */ #ifdef HYPRE_NO_GLOBAL_PARTITION part_size = 2; #else hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size); part_size++; #endif row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); for (i = 0; i < part_size; i++) { row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i]; col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i]; } /* Generate the discrete gradient matrix */ G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParVectorGlobalSize(x_coord), row_starts, col_starts, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 1; hypre_ParCSRMatrixOwnsColStarts(G) = 1; hypre_CSRMatrixBigJtoJ(local); GenerateDiagAndOffd(local, G, hypre_ParVectorFirstIndex(x_coord), hypre_ParVectorLastIndex(x_coord)); /* Account for empty rows in G. These may appear when A includes only the interior (non-Dirichlet b.c.) edges. */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord)); } /* Free the local matrix */ hypre_CSRMatrixDestroy(local); } *G_ptr = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEISetup * * Construct an AMS solver object based on the following data: * * A - the edge element stiffness matrix * num_vert - number of vertices (nodes) in the processor * num_local_vert - number of vertices owned by the processor * vert_number - global indexes of the vertices in the processor * vert_coord - coordinates of the vertices in the processor * num_edges - number of edges owned by the processor * edge_vertex - the vertices of the edges owned by the processor. * Vertices are in local numbering (the same as in * vert_number), and edge orientation is always from * the first to the second vertex. * * Here we distinguish between vertices that belong to elements in the * current processor, and the subset of these vertices that is owned by * the processor. * * This function is written specifically for input from the FEI and should * be called before hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEISetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x, HYPRE_Int num_vert, HYPRE_Int num_local_vert, HYPRE_BigInt *vert_number, HYPRE_Real *vert_coord, HYPRE_Int num_edges, HYPRE_BigInt *edge_vertex) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, j; hypre_ParCSRMatrix *G; hypre_ParVector *x_coord, *y_coord, *z_coord; HYPRE_Real *x_data, *y_data, *z_data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt *vert_part, num_global_vert; HYPRE_BigInt vert_start, vert_end; HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert; /* Find the processor partitioning of the vertices */ #ifdef HYPRE_NO_GLOBAL_PARTITION vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); vert_part[0] = vert_part[1] - big_local_vert; hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); vert_part = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&big_local_vert, 1, HYPRE_MPI_BIG_INT, &vert_part[1], 1, HYPRE_MPI_BIG_INT, comm); vert_part[0] = 0; for (i = 0; i < num_procs; i++) vert_part[i+1] += vert_part[i]; num_global_vert = vert_part[num_procs]; #endif /* Construct hypre parallel vectors for the vertex coordinates */ x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(x_coord); hypre_ParVectorOwnsData(x_coord) = 1; hypre_ParVectorOwnsPartitioning(x_coord) = 0; x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord)); y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(y_coord); hypre_ParVectorOwnsData(y_coord) = 1; hypre_ParVectorOwnsPartitioning(y_coord) = 0; y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord)); z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(z_coord); hypre_ParVectorOwnsData(z_coord) = 1; hypre_ParVectorOwnsPartitioning(z_coord) = 0; z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord)); vert_start = hypre_ParVectorFirstIndex(x_coord); vert_end = hypre_ParVectorLastIndex(x_coord); /* Save coordinates of locally owned vertices */ for (i = 0; i < num_vert; i++) { if (vert_number[i] >= vert_start && vert_number[i] <= vert_end) { j = (HYPRE_Int)(vert_number[i] - vert_start); x_data[j] = vert_coord[3*i]; y_data[j] = vert_coord[3*i+1]; z_data[j] = vert_coord[3*i+2]; } } /* Change vertex numbers from local to global */ for (i = 0; i < 2*num_edges; i++) edge_vertex[i] = vert_number[edge_vertex[i]]; /* Construct the local part of G based on edge_vertex */ { /* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */ HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST); HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges, num_global_vert, 2*num_edges); for (i = 0; i <= num_edges; i++) I[i] = 2*i; /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*num_edges; i+=2) { data[i] = 1.0; data[i+1] = -1.0; } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = num_edges; G = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), num_global_vert, hypre_ParCSRMatrixRowStarts(A), vert_part, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 0; hypre_ParCSRMatrixOwnsColStarts(G) = 1; hypre_CSRMatrixBigJtoJ(local); GenerateDiagAndOffd(local, G, vert_start, vert_end); //hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } ams_data -> G = G; ams_data -> x = x_coord; ams_data -> y = y_coord; ams_data -> z = z_coord; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEIDestroy * * Free the additional memory allocated in hypre_AMSFEISetup(). * * This function is written specifically for input from the FEI and should * be called before hypre_AMSDestroy(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEIDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> G) hypre_ParCSRMatrixDestroy(ams_data -> G); if (ams_data -> x) hypre_ParVectorDestroy(ams_data -> x); if (ams_data -> y) hypre_ParVectorDestroy(ams_data -> y); if (ams_data -> z) hypre_ParVectorDestroy(ams_data -> z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms Threads * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int num_threads, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A)); HYPRE_Int ii, ns, ne, rest, size; HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE #endif for (k = 0; k < num_threads; k++) { size = num_rows/num_threads; rest = num_rows - size*num_threads; if (k < rest) { ns = k*size+k; ne = (k+1)*size+k+1; } else { ns = k*size+rest; ne = (k+1)*size+rest; } if (option == 1) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { /* Set the diag element */ for (i = ns; i < ne; i++) { l1_norm[i] = A_diag_data[A_diag_I[i]]; if (l1_norm[i] == 0) l1_norm[i] = 1.0; } } if (option < 5) { /* Handle negative definite matrices */ for (i = ns; i < ne; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = ns; i < ne; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } } hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST); *l1_norm_ptr = l1_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRRelaxThreads * 1 = l1-scaled Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int relax_type, HYPRE_Int relax_times, HYPRE_Real *l1_norms, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *z) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data; HYPRE_Real *v_buf_data; HYPRE_Real *tmp_data; HYPRE_Int i, j; HYPRE_Int ii, jj; HYPRE_Int ns, ne, size, rest; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, num_threads, my_id; HYPRE_Real zero = 0.0; HYPRE_Real res, res2; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /* only allow jacobi and GS */ if (relax_type > 2) relax_type = 2; /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } if (relax_type == 1) /* Jacobi */ { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (relax_weight*res)/l1_norms[i]; } } } else if (relax_type == 2) /* GS */ { if (relax_weight == 1 && omega == 1) { tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } else { HYPRE_Real c1 = omega*relax_weight; HYPRE_Real c2 = omega*(1.0-relax_weight); tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { tmp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res2 = 0.0; res = f_data[i]; Vtemp_data[i] = u_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; if (ii < i) res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]); } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (c1*res + c2*res2) / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; if (ii > i) res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]); } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (c1*res + c2*res2) / l1_norms[i]; } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } /* end of Jacobi or G.S. */ if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return(relax_error); }
zgetrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * ******************************************************************************/ int plasma_zgetrs(int n, int nrhs, plasma_complex64_t *pA, int lda, int *ipiv, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (n < 0) { plasma_error("illegal value of n"); return -1; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pB, ldb, B, sequence, &request); } #pragma omp parallel #pragma omp master { // Call the tile async function. plasma_omp_zgetrs(A, ipiv, B, sequence, &request); } #pragma omp parallel #pragma omp master { // Translate back to LAPACK layout. plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * ******************************************************************************/ void plasma_omp_zgetrs(plasma_desc_t A, int *ipiv, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pzgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request); plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, A, B, sequence, request); plasma_pztrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
region_3.tfm.c
void baz(int M, int *restrict T, int N, int *restrict A) { #pragma omp parallel { #pragma omp for default(shared) for (int I = 0; I < N; ++I) { A[I] = I; for (int J = 0; J < M; ++J) A[I] = A[I] + T[J]; } } } void bar(int M, int *restrict T, int N, int *restrict A) { baz(M, T, N, A); } void foo(int N, int *A) { int TSize = 4; int T[4]; for (int I = 0; I < TSize; ++I) T[I] = I; #pragma spf region { bar(TSize, T, N, A); } }
fci_spin.c
/* * 2-particle spin density matrix * Gamma(ia,jb,kb,la) or Gamma(ib,ja,ka,lb) */ #include <stdlib.h> #include <string.h> #include "config.h" #include "vhf/fblas.h" #define CSUMTHR 1e-28 #define EXTRACT_CRE(tab, i) (tab[i*4+0]) #define EXTRACT_DES(tab, i) (tab[i*4+1]) #define EXTRACT_ADDR(tab, i) (tab[i*4+2]) #define EXTRACT_SIGN(tab, i) (tab[i*4+3]) /* * the intermediate determinants ~ (norb,neleca+1;norb,nelecb-1) * Annihilating one alpha electron and creating one beta electron lead * to the input ground state CI |0> * stra_id is the ID of the intermediate determinants. t1 is a buffer * of size [nstrb_or_fillcnt,norb*norb]. fillcnt is the dim of beta * strings for intermediate determinants */ static double ades_bcre_t1(double *ci0, double *t1, int fillcnt, int stra_id, int norb, int nstrb, int neleca, int nelecb, int *ades_index, int *bcre_index) { const int nnorb = norb * norb; const int inelec = neleca + 1; const int invir = norb - nelecb + 1; int ic, id, i, j, k, str1, sign, signa; const int *tab; double *pt1, *pci; double csum = 0; ades_index = ades_index + stra_id * inelec * 4; for (id = 0; id < inelec; id++) { j = EXTRACT_DES (ades_index, id); str1 = EXTRACT_ADDR(ades_index, id); signa = EXTRACT_SIGN(ades_index, id); pci = ci0 + str1 * (size_t)nstrb; pt1 = t1 + j*norb; for (k = 0; k < fillcnt; k++) { tab = bcre_index + k * invir * 4; for (ic = 0; ic < invir; ic++) { i = EXTRACT_CRE (tab, ic); str1 = EXTRACT_ADDR(tab, ic); sign = EXTRACT_SIGN(tab, ic) * signa; pt1[i] += pci[str1] * sign; csum += pci[str1] * pci[str1]; } pt1 += nnorb; } } return csum; } /* * the intermediate determinants ~ (norb,neleca-1;norb,nelecb+1) * Annihilating one beta electron and creating one alpha electron lead * to the input ground state CI |0> * stra_id is the ID of the intermediate determinants. t1 is a buffer * of size [nstrb_or_fillcnt,norb*norb]. fillcnt is the dim of beta * strings for intermediate determinants */ static double acre_bdes_t1(double *ci0, double *t1, int fillcnt, int stra_id, int norb, int nstrb, int neleca, int nelecb, int *acre_index, int *bdes_index) { const int nnorb = norb * norb; const int inelec = nelecb + 1; const int invir = norb - neleca + 1; int ic, id, i, j, str0, str1, sign, signa; const int *tab; double *pci, *pt1; double csum = 0; acre_index = acre_index + stra_id * invir * 4; for (ic = 0; ic < invir; ic++) { i = EXTRACT_CRE (acre_index, ic); str1 = EXTRACT_ADDR(acre_index, ic); signa = EXTRACT_SIGN(acre_index, ic); pci = ci0 + str1 * (size_t)nstrb; pt1 = t1 + i; tab = bdes_index; for (str0 = 0; str0 < fillcnt; str0++) { for (id = 0; id < inelec; id++) { j = EXTRACT_DES (tab, id); str1 = EXTRACT_ADDR(tab, id); sign = EXTRACT_SIGN(tab, id) * signa; pt1[j*norb] += sign * pci[str1]; csum += pci[str1] * pci[str1]; } tab += inelec * 4; pt1 += nnorb; } } return csum; } static void _transpose_jikl(double *dm2, int norb) { int nnorb = norb * norb; int i, j; double *p0, *p1; double *tmp = malloc(sizeof(double)*nnorb*nnorb); memcpy(tmp, dm2, sizeof(double)*nnorb*nnorb); for (i = 0; i < norb; i++) { for (j = 0; j < norb; j++) { p0 = tmp + (j*norb+i) * nnorb; p1 = dm2 + (i*norb+j) * nnorb; memcpy(p1, p0, sizeof(double)*nnorb); } } free(tmp); } /* * If symm != 0, symmetrize rdm1 and rdm2 * For spin density matrix, return rdm2 e.g. * [beta alpha beta^+ alpha] * transpose(1,0,2,3) to get the right order [alpha^+ beta beta^+ alpha] * na, nb, nlinka, nlinkb label the intermediate determinants * see ades_bcre_t1 and acre_bdes_t1 of fci_spin.c * * Note: na counts the alpha strings of intermediate determinants * but nb counts the beta strings of ket */ void FCIspindm12_drv(void (*dm12kernel)(), double *rdm1, double *rdm2, double *bra, double *ket, int norb, int na, int nb, int neleca, int nelecb, int *link_indexa, int *link_indexb, int symm) { const int nnorb = norb * norb; int strk, i, j; memset(rdm1, 0, sizeof(double) * nnorb); memset(rdm2, 0, sizeof(double) * nnorb*nnorb); #pragma omp parallel default(none) \ shared(dm12kernel, bra, ket, norb, na, nb, neleca, nelecb, \ link_indexa, link_indexb, rdm1, rdm2), \ private(strk, i) { double *pdm1 = calloc(nnorb, sizeof(double)); double *pdm2 = calloc(nnorb*nnorb, sizeof(double)); #pragma omp for schedule(static, 40) for (strk = 0; strk < na; strk++) { (*dm12kernel)(pdm1, pdm2, bra, ket, strk, norb, na, nb, neleca, nelecb, link_indexa, link_indexb); } #pragma omp critical { for (i = 0; i < nnorb; i++) { rdm1[i] += pdm1[i]; } for (i = 0; i < nnorb*nnorb; i++) { rdm2[i] += pdm2[i]; } } free(pdm1); free(pdm2); } if (symm) { for (i = 0; i < norb; i++) { for (j = 0; j < i; j++) { rdm1[j*norb+i] = rdm1[i*norb+j]; } } for (i = 0; i < nnorb; i++) { for (j = 0; j < i; j++) { rdm2[j*nnorb+i] = rdm2[i*nnorb+j]; } } } _transpose_jikl(rdm2, norb); } /* * dm(pq,rs) * [p(alpha)^+ q(beta) r(beta)^+ s(alpha)] */ void FCIdm2_abba_kern(double *rdm1, double *rdm2, double *bra, double *ket, int stra_id, int norb, int na, int nb, int neleca, int nelecb, int *acre_index, int *bdes_index) { const char UP = 'U'; const char TRANS_N = 'N'; const double D1 = 1; const int nnorb = norb * norb; const int instrb = nb * (norb-nelecb) / (nelecb+1); double csum; double *buf = calloc(nnorb * instrb, sizeof(double)); csum = acre_bdes_t1(ket, buf, instrb, stra_id, norb, nb, neleca, nelecb, acre_index, bdes_index); if (csum > CSUMTHR) { dsyrk_(&UP, &TRANS_N, &nnorb, &instrb, &D1, buf, &nnorb, &D1, rdm2, &nnorb); } free(buf); } /* * dm(pq,rs) * [p(beta)^+ q(alpha) r(alpha)^+ s(beta)] */ void FCIdm2_baab_kern(double *rdm1, double *rdm2, double *bra, double *ket, int stra_id, int norb, int na, int nb, int neleca, int nelecb, int *ades_index, int *bcre_index) { const char UP = 'U'; const char TRANS_N = 'N'; const double D1 = 1; const int nnorb = norb * norb; const int instrb = nb * nelecb / (norb-nelecb+1); double csum; double *buf = calloc(nnorb * instrb, sizeof(double)); csum = ades_bcre_t1(ket, buf, instrb, stra_id, norb, nb, neleca, nelecb, ades_index, bcre_index); if (csum > CSUMTHR) { dsyrk_(&UP, &TRANS_N, &nnorb, &instrb, &D1, buf, &nnorb, &D1, rdm2, &nnorb); } free(buf); }
RTDP.h
/* (c) Yaroslav Salii & the Parliament of Owls, 2017+ License:sort of CC---if you go commercial, contact me RTDP v.1.0 2017-01-30; RTDP v.1.1 2018-02-14; adding tie-breaking code (less bit mask is better &c) to cacheRtBF RTDP v.1.2 2019-02-06; added states' counter to DP code & its logging (only exact DP, not hRtDP) RTDP v.1.2.1 2019-02-07; added states' total counter (a field in t_DP; not used in hRtDP), removed OMP pragmas restricted & exact dynamic programming solutions */ #ifndef RTDP_H_ #define RTDP_H_ #include<iostream> #include<fstream> #include<queue> #include<list> #include <omp.h> #include <thread> #include"dp-base.h" #include"dp-recovery.h" #include"instance.h" #include"log-aux.h" #include"reader-base.h" //========NAMECALLING=&=AUXILIARIES==============================/ /* all fields separated with "-" methodDirn: method direction (BWD:backwards/filters or FWD:forwards/ideals) methodCode: name of method (DP for exact DP; hRtDP for heuristic restricted DP) methodNote: method-specific notes; hRtDP specifies its breadth, for instance */ inline std::string mkSlnCode (const std::string methDirn , const std::string methCode , const std::string methNote) { std::string sep = "-"; return isperse(t_lines {methDirn, methCode, methNote},sep); } inline std::string mk_hRtDP_Code(const t_Direction DIR, const uint32_t ibreadth) { return mkSlnCode(getDirectionCode(DIR), "hRtDP", std::to_string(ibreadth)); } inline std::string mk_DP_Code(const t_Direction DIR) { return mkSlnCode(getDirectionCode(DIR), "DP", ""); } //--------------------------------------------------------------/ //========SOLUTION===OBJECT==========================/ /* FILE NAMING CONVENTION: log: slnName.log dump: slnName.dump (dump, more compact, not too pretty output) */ using t_DP = struct t_DP { const t_Instance& p;//the PROBLEM we solve; a constant reference, to avoid the pointer syntax //order direction: BWD(feasible tsets:order filters)/FWD(feasible tsets:order ideals) const t_Direction D; //$type-$parameters; const std::string slnCode; //FWD-DP, etc. const std::string slnName;//p.instName + "-" + slnCode const std::string logName;//slnName.log const std::string dumpName;//slnName.dump //dedicated logging ofstream; would use cerr for what.log std::ofstream slnLog;//slnName.log std::ofstream slnDump;//slnName.dump t_stopwatch slnTime;//how long did it take to (solve, etc.) std::deque<t_memRec> slnMem; t_memRec baselineMem; t_solution result; //would line up and report in another procedure size_t nStatesTotal{ 0 };//the total number of states, will be computed during solution by this->solve() t_vstLayer layer;//the Bellman Function t_DP(const t_Instance& iproblem, const t_Direction iDIR, const std::string islnCode) : p(iproblem) , D(iDIR) , slnCode(islnCode)//, slnCode(mk_DP_Code(D)) , slnName(iproblem.instName + "-" + slnCode) , logName(slnName + ".log") , dumpName(slnName + ".dump") { //==================INIT==LOG=&=STOPWATCH=====================/ //prep the layer end times vector slnTime.vlayerDone.resize(p.dim + 1); //prep the memory usage counter: get the baseline (without any BF values) baselineMem = t_memRec(); //bind the log file, purge it (further calls would ios_base::append there) slnLog.open(logName, std::ios_base::out); //start counting the time slnTime.start = time(NULL); // slnTime.vlayerDone[0] = myClock::now(); std::ostringstream tmp; tmp << "This is " + slnName + ". Started on " << mkTimeStamp(slnTime.start) << "\n" <<"baseline memory use: "<<baselineMem.to_string()<<"\n"; //brag about starting, into log and stdout slnLog << tmp.str(); std::cout << tmp.str(); //write a header into the log and close it for now slnLog << logHeader<<"\n"; slnLog.close(); //-------------------------------------------------------------/ //===================INIT==SLN=&=BF(LAYERS)==DATA==STRUCTURES==/ //prepare the result deque: make sure base and terminal fit in result.resize(p.dim + 2);//dim=proper clusters; 0||1..dim||\trm layer.resize(p.dim + 1);//layers 0..dim; l[dim]=\{(0,\rg{1}{dim})\} // for each ultimate element (BWD: max/they don't send; FWD: min/don't receive) //gE(EmptySet,...): expansions for empty set---the ultimate elements foreach_elt(m, D.gE(EmptySet, p.ord, p.wkOrd), p.dim) { //for each point therein (should be BWD: each exit point FWD: each entry point foreach_point(pt, p.popInfo[m].pfirst, p.popInfo[m].plast) { /*FWD initial conditions: v(x,\{\varnothing\})=extMtCost(base,x,\{\varnothing\}) BWD initial conditions: v(x,\{\varnothing\})=extMtCost(x,terminal,\{\varnothing\})*/ t_cost costIntlCnd = (D == FWD) ? (p.f.cExtMt(0, pt, 0, p.cost)) //BWD: had to skip to cost directly to support TD-TSP-cost by KCvH : /*(p.cost[pt][p.dim + 1]);*/(p.f.cExtMt(pt, p.dim + 1, 0, p.cost)); layer[0][0].emplace(std::make_pair(pt, costIntlCnd) ); } //prime cardinality-1 tasksets (ord.Max singletons) layer[1][BIT0 << m].clear();// layer[1] <- \{m\} }//next ultimate element //count the initial states nStatesTotal += layer[0][0].size(); //measure the current memory usage and record that; slnMem.emplace_back(t_memRec(baselineMem)); }//----------DONE----CONSTRUCTION-&---SOLUTION--PRIMING---------/ /*delegate constructor: it “knows” its own SLNCODE function; necessary only for this---base---class*/ t_DP(const t_Instance& iproblem, const t_Direction iDIR) : t_DP(iproblem, iDIR, mk_DP_Code(iDIR)){}; //====================BF====COMPUTATION===========================/ void compExpandBF(const t_bin& K //taskset , const t_bin& EK //its feasible expansions:its interfaces , const t_stLayer& prevL//prev. layer, for computing BF , t_stLayer& thisL//this layer, to fill in the BF's values // , t_stLayer& nextL//next layer, for generating the next taskset(s) , t_2clear& to_clear_nextL)//next layer, for generating the next taskset(s) { foreach_elt(m, EK, p.dim)//for each expanding city { //for each (exit) point of the expanding city foreach_point(x, p.popInfo[m].pfirst, p.popInfo[m].plast) { //find its cost in view of prevL through (BF) and put K->x->cost thisL[K].emplace(x, minmin(x, K, prevL, p, D)); }//next (exit) x from the city m //expand the next layer // to_clear_nextL[omp_get_thread_num()].push_back((K | (BIT0 << m))); // to_clear_nextL[omp_get_thread_num()].insert((K | (BIT0 << m))); to_clear_nextL.insert((K | (BIT0 << m))); }//next expanding city m\in EK return; } //in fact, it would be enough to copy all the elements of to_clear into nextL as keys (with empty values) //though there is little reason to believe much performance is lost here //since the complexity of .clear() on already empty elements is minuscule static void collect_garbage(t_2clear& to_clear, t_stLayer& nextL) { for (auto &thread_queue : to_clear) { nextL[thread_queue].clear(); // for (auto key : thread_queue.second) { // nextL[key].clear(); // } } } t_solution solve() { for (mtag l = 1; l < p.dim; l++)//for layers 1..dim-1; { t_2clear to_clear; //for each task set (ideal/filter) of cardinality l size_t nStates = 0;//to count the states at this layer //=================OMP=========TASKS====================/ #pragma omp parallel default (shared) #pragma omp single nowait for (auto ts : layer[l])//implemented as ts.first; .second is for the states { /*recall: FWD:coMin, BWD:coMax t_bin fexp = D.gE(ts.first, p.ord, p.wkOrd);*/ //compute (BF) for all (x,K): x\in fexp, K=ts.first; #pragma omp task untied firstprivate(ts) { compExpandBF(ts.first , D.gE(ts.first, p.ord, p.wkOrd) , layer[l - 1] , layer[l] // , layer[l + 1] , to_clear); nStates += layer[l][ts.first].size();//count the states associated with ts.first } }//next task set (filter) #pragma omp taskwait collect_garbage(to_clear, layer[l+1]); //-----------OMP-------------TASKS-------DONE-----------/ //all current-layer states' values computed; all current-layer tasksets expanded. { nStatesTotal += nStates;//add this layer's state count to the total slnTime.vlayerDone[l] = myClock::now();//get the current time //measure current memory use /w respect to last recorded auto memUse = t_memRec(); //record it slnMem.emplace_back(memUse); //brag(layerNumber:wall-clock:delta:worstState:bestState) slnLog.open(logName, std::ios_base::app); slnLog << mkReportL(l, time(NULL), slnTime._rel_time(0, l), slnTime._rel_time(l - 1, l), layer[l].size(), t_stateDesc(), t_stateDesc(), memUse, nStates) << "\n"; slnLog.close(); } }//next layer //done the ordinary layers; proceed to the complete problem BWD:(0,\rg{1}{dim})/FWD:(\trm,\rg{1}{dim}) //FWD: \trm==p.dim+1; BWD: (the) base==0 ptag lastIntf = (D == FWD) ? (p.dim + 1) : (0); layer[p.dim].begin()->second.emplace(lastIntf, minmin(lastIntf, p.wkOrd.omask, layer[p.dim - 1],p,D)); t_stateDesc full{ layer[p.dim].begin()->first //taskset==omask , layer[p.dim].begin()->second.begin()->first //FWD: \trm, BWD: 0 , layer[p.dim].begin()->second.begin()->second };//the whole problem's cost //////////////////////////////////////////////////////////////////////////////////////////////////////// //time this doing slnTime.vlayerDone[p.dim] = myClock::now(); auto bfEndTime_t = time(NULL); //measure current memory use auto memUse = t_memRec(); //record it slnMem.emplace_back(memUse); //log the event slnLog.open(logName, std::ios_base::app); slnLog << mkReportL(p.dim , bfEndTime_t , slnTime._rel_time(0, p.dim) , slnTime._rel_time(p.dim - 1, p.dim) , layer[p.dim].size(), full, full , memUse , this->nStatesTotal) << "\n"; //it's the last layer, tell about the total states' number slnLog.close(); //----------BF----DONE----------------------------------/ //============REPORTS,==RECOVERY,==ETC.=================/ //auto wat = iminmin(full, layer[p.dim - 1], p, D); slnLog.open(logName, std::ios_base::app); slnLog << "\n" << "BF DONE: " << mkTimeStamp(bfEndTime_t) << "\n" << "TOTAL DURATION: " << mkMsecDurationFull(slnTime._rel_time(0, p.dim)) << "\n" << "TOTAL DURATION IN SECONDS: " << mkMsecDurationBrief(slnTime._rel_time(0, p.dim)) << "\n" << "\n" << "RAM USAGE AT LAST LAYER: " << to_stringBytes(slnMem.back().physMem) << "\n" << "VM USAGE AT LAST LAYER: " << to_stringBytes(slnMem.back().virtMem) << "\n" << "TOTAL STATES PROCESSED:" << nStatesTotal << "\n" << "RAM BYTES PER STATE(APPX):"<< int(slnMem.back().physMem / nStatesTotal); //mkDuration(getRelTime(slnTime.vlayerDone[p.dim], slnTime.start)); slnLog.close(); //////////////////////////////////////////////////////////////////////////////////////////////////////// //recover the solution this->result = recoverSln(full,layer,p,D);//can now destroy this->layer //time the event slnTime.recoveryDone = myClock::now(); //report the event slnLog.open(logName, std::ios_base::app); slnLog << "\n" << "SOLUTION RECOVERY DONE: " << mkTimeStamp(time(NULL)) << "\n" << "RECOVERY TOOK: " << mkMsecDurationBrief(msecDuration(slnTime.vlayerDone[p.dim], slnTime.recoveryDone)) << "\n" << "BF + RECOVERY DURATION: " << mkMsecDurationFull(msecDuration(slnTime.vlayerDone[0], slnTime.recoveryDone)) << "\n" << "BF + RECOVERY DURATION IN SECONDS: " << mkMsecDurationBrief(msecDuration(slnTime.vlayerDone[0], slnTime.recoveryDone)) << "\n"; slnLog.close(); /////////////////////////////////////////////////////////////////////////////////////////////////////// //====================FINAL==REPORT(DUMP)==================/ //NB:relies on result being written to this->result before slnDump.open(dumpName); slnDump << mkFullDump(p, result,D); slnDump.close(); return this->result; } }; //--------------------------------------------------------------/ //==============RESTRICTED===DP=================================/ /*auxiliary priority queue, for keeping states sorted by value*/ using t_sortaid = //struct t_sortaid : std::priority_queue < t_stateDesc, std::vector<t_stateDesc>, std::less<t_stateDesc> > struct t_sortaid : std::priority_queue < t_stateDesc, std::vector<t_stateDesc>, std::less<t_stateDesc> > { t_sortaid(size_t reserve_size) { this->c.reserve(reserve_size); } }; //--------------------------------/ using t_hRtDP = struct t_hRtDP : public t_DP { const uint32_t B;//breadth //everything as it was before, plus initialize the breadth parameter; re-initialize slnCode (sorry!) t_hRtDP(const t_Instance& iproblem, const t_Direction iDIR, uint32_t ibreadth) : t_DP(iproblem, iDIR,mk_hRtDP_Code(iDIR,ibreadth)) , B(ibreadth){ }; //calculate the cost of states (x,K), where x\in EK; keep only B best in pri_queue wpl inline void cacheRtBF( const t_bin& K , const t_bin& EK , const t_stLayer& prevL , t_sortaid& wpl ) const //side-effect the workplace, but doesn't change the object //relies on this->B,D,p.dim,ord,wkOrd,popInfo,cost; { foreach_elt(m, EK, p.dim)//for each expanding city { //for each (exit) point of the expanding city foreach_point(x, p.popInfo[m].pfirst, p.popInfo[m].plast) { t_cost xK_cst = minmin(x, K, prevL,p,D);//find its cost in view of prevL through (BF) // t_stateDesc newSt{ K, x, xK_cst }; if (wpl.size() < this->B)//breadth not exceeded wpl.emplace(K, x, xK_cst); else//breadth exceeded { //if (x,K) is better than the worst retained, wpl.top(); wired through t_stateDesc.operator< /*(x,K) is better than the worst retained or (tie-breaker) the cost is the same but new intf (x) is less than the known or cost is the same and intf (x) is the same but task set (K) is less than known*/ if ( wpl.top() > t_stateDesc(K,x,xK_cst ) )//(x,K) is better than the worst retained { //test if it's already there? nah, K's are disjoint! wpl.pop();//expel the worser one //wpl.emplace(t_stateDesc{ K, x, xK_cst });//add the newly found wpl.emplace(K, x, xK_cst); } } }//next (exit) x from the city m }//next expanding city m\in EK return; } t_solution solve() { for (mtag l = 1; l < p.dim; l++)//for layers 1..dim-1; { //there are at most B best states by definition; reserve B "cells" in advance t_sortaid bestStates(this->B); //for each task set (ideal/filter) of cardinality l for (auto ts : layer[l])//implemented as ts.first; .second is for the states { { /*recall: FWD:coMin, BWD:coMax t_bin fexp = D.gE(ts.first, p.ord, p.wkOrd);*/ //compute (BF) for all (x,K): x\in fexp, K=ts.first; keep B best cacheRtBF( ts.first , D.gE(ts.first, p.ord, p.wkOrd) , layer[l - 1] , bestStates); }//END::OMP_TASK }//next task set (filter) { //all current-layer states' values computed; best B remain in bestStates; auto wSt = bestStates.top(); //one---the best---must remain to be reported while (bestStates.size() > 1)//write to permanent structure; expand the corresponding taskset { auto st = bestStates.top(); bestStates.pop(); layer[l + 1][st.K | (BIT0 << p.cityof[st.x])].clear();//make a next-layer taskset layer[l][st.K].emplace(st.x, st.v);//write v(st.K,st.inf)=st.v } //copy the best state for report auto bSt = bestStates.top(); //write the best state's information into the next layer layer[l + 1][bSt.K | (BIT0 << p.cityof[bSt.x])].clear();//make a next-layer taskset layer[l][bSt.K].emplace(bSt.x, bSt.v);//write v(bSt.K,bSt.inf)=bSt.v slnTime.vlayerDone[l] = myClock::now();// time(NULL);//get the current time //measure current memory use /w respect to last recorded auto memUse = t_memRec(); //record it slnMem.emplace_back(memUse); //brag(layerNumber:wall-clock:delta:worstState:bestState) slnLog.open(logName, std::ios_base::app); slnLog << mkReportL(l, time(NULL), slnTime._rel_time(0, l), slnTime._rel_time(l - 1, l), layer[l].size(), bSt, wSt, memUse) << "\n"; slnLog.close(); }//END::OMP_MASTER }//next layer //done the ordinary layers; proceed to the complete problem BWD:(0,\rg{1}{dim})/FWD:(\trm,\rg{1}{dim}) //FWD: \trm==p.dim+1; BWD: (the) base==0 ptag lastIntf = (D == FWD) ? (p.dim + 1) : (0); layer[p.dim].begin()->second.emplace(lastIntf, minmin(lastIntf, p.wkOrd.omask, layer[p.dim - 1], p, D)); t_stateDesc full{ layer[p.dim].begin()->first //taskset==omask , layer[p.dim].begin()->second.begin()->first //FWD: \trm, BWD: 0 , layer[p.dim].begin()->second.begin()->second };//the whole problem's cost //////////////////////////////////////////////////////////////////////////////////////////////////////// //time this doing slnTime.vlayerDone[p.dim] = myClock::now(); auto bfEndTime_t = time(NULL); //measure current memory use auto memUse = t_memRec(); //record it slnMem.emplace_back(memUse); //log the event slnLog.open(logName, std::ios_base::app); slnLog << mkReportL(p.dim , bfEndTime_t , slnTime._rel_time(0, p.dim) , slnTime._rel_time(p.dim - 1, p.dim) , layer[p.dim].size(), full, full , memUse) << "\n"; slnLog.close(); //----------BF----DONE----------------------------------/ //============REPORTS,==RECOVERY,==ETC.=================/ //auto wat = iminmin(full, layer[p.dim - 1], p, D); slnLog.open(logName, std::ios_base::app); slnLog << "\n" << "BF DONE: " << mkTimeStamp(bfEndTime_t) << "\n" << "TOTAL DURATION: " << mkMsecDurationFull(slnTime._rel_time(0, p.dim)) << "\n" << "TOTAL DURATION IN SECONDS: " << mkMsecDurationBrief(slnTime._rel_time(0, p.dim)) << "\n" << "\n" << "RAM USAGE AT LAST LAYER: " << to_stringBytes(slnMem.back().physMem) << "\n" << "VM USAGE AT LAST LAYER: " << to_stringBytes(slnMem.back().virtMem); slnLog.close(); //////////////////////////////////////////////////////////////////////////////////////////////////////// //recover the solution this->result = recoverSln(full,layer, p, D);//can now destroy this->layer //time the event slnTime.recoveryDone = myClock::now(); //report the event slnLog.open(logName, std::ios_base::app); slnLog << "\n" << "SOLUTION RECOVERY DONE: " << mkTimeStamp(time(NULL)) << "\n" << "RECOVERY TOOK: " << mkMsecDurationBrief(msecDuration(slnTime.vlayerDone[p.dim], slnTime.recoveryDone)) << "\n" << "BF + RECOVERY DURATION: " << mkMsecDurationFull(msecDuration(slnTime.vlayerDone[0], slnTime.recoveryDone)) << "\n" << "BF + RECOVERY DURATION IN SECONDS: " << mkMsecDurationBrief(msecDuration(slnTime.vlayerDone[0], slnTime.recoveryDone)) << "\n"; slnLog.close(); ////////////////////////////////////////////////////////////////////////////////////////////////////// //====================FINAL==REPORT(DUMP)==================/ //NB:relies on result being written to this->result before slnDump.open(dumpName); slnDump << mkFullDump(p,result,D); //slnDump << std::endl << mkRtDump(p, result); slnDump.close(); return this->result; } }; //----------------------------------------------------------------------------/ #endif
mp4.c
//Transpose with critical section and no locks #include<stdio.h> #include<time.h> #include<omp.h> void main() { int a[5][5],b[5][5],c[5][5],temp=0,ch; printf("Menu\n1.Express Mode\n2.Custom Mode\n"); printf("Enter your choice:"); scanf("%d",&ch); if(ch == 1) { int l = 1; for(int i=0;i<5;i++) { for(int j=0;j<5;j++) { a[i][j] = l; b[i][j] = 1; l++; } } }else{ int k=1; for(int i=0;i<5;i++) { for(int j=0;j<5;j++) { printf("Enter element %d of first matrix:",k); scanf("%d",&a[i][j]); k++; } } k = 1; for(int i=0;i<5;i++) { for(int j=0;j<5;j++) { printf("Enter element %d of second matrix:",k); scanf("%d",&b[i][j]); k++; } } } printf("\nThe First Matrix is:\n"); for(int i = 0; i < 5; i++) { for(int j = 0; j < 5; j++) { printf("%d\t", a[i][j]); } printf("\n"); } printf("\nThe Second Matrix is:\n"); for(int i = 0; i < 5; i++) { for(int j = 0; j < 5; j++) { printf("%d\t", b[i][j]); } printf("\n"); } clock_t begin = clock(); #pragma omp parallel num_threads(5) { #pragma omp for for(int i = 0; i < 5; i++) { int id = omp_get_thread_num(); for(int j = 0; j < i; j++) { #pragma omp atomic { temp = a[i][j]; a[i][j] = a[j][i]; a[j][i] = temp; } } printf("Thread %d\n",id); } } printf("\nTranspose of First Matrix:\n"); for(int i = 0; i < 5; i++) { for(int j = 0; j < 5; j++) { printf("%d\t", a[i][j]); } printf("\n"); } #pragma omp parallel num_threads(5) { #pragma omp for for(int i = 0; i < 5;i++) { int id = omp_get_thread_num(); for(int j = 0; j < 5;j++) { c[i][j] = a[i][j] + b[i][j]; } printf("Thread %d\n",id); } } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("CPU Time used = %lfms",time_spent); printf("\nSum Matrix Is:\n"); for(int i = 0; i < 5; i++) { for(int j = 0; j < 5; j++) { printf("%d\t", c[i][j]); } printf("\n"); } }
THOmpLabConv.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THOmpLabConv.c" #else /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THOmpLab_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol) { long nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THTensor *input = THTensor_(newContiguous)(t_); THTensor *kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "covn2DRevger : Input image is smaller than kernel"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } long k; #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THLab_(validXCorr2DRevptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THOmpLab_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol) { long nbatch, nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0, istride1, kstride1; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THTensor *input = THTensor_(newContiguous)(t_); THTensor *kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; istride1 = input->stride[1]; nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelPlane = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv2DRevger : Input image is smaller than kernel"); THArgCheck(kernel->size[0] == input->size[0] , 2, "conv2DRevger : Input batch and kernel batch is not same size"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } long k; #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; for(i = 0; i < nInputPlane; i++) { long p; for(p = 0; p < nbatch; p++) { /* get kernel */ real *ptr_weight = weight_data + p*kstride0 + k*kstride1; /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data + p*istride0 + i*istride1; /* do image, kernel convolution */ THLab_(validXCorr2DRevptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A */ void THOmpLab_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *type) { long nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(type[0] == 'v' || type[0] == 'f', 7, "type of convolution can 'v' or 'f'"); THArgCheck(type[1] == 'c' || type[1] == 'x', 7, "type of convolution can 'x' or 'c'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor *kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *type == 'f', 2, "conv2Dger : Input image is smaller than kernel"); if (*type == 'f') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { // valid nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } long k; #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ if (type[0] == 'f') if (type[1] == 'x') THLab_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THLab_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (type[1] == 'x') THLab_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THLab_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THOmpLab_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *type) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0, kstride1; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(type[0] == 'v' || type[0] == 'f', 7, "type of convolution can 'v' or 'f'"); THArgCheck(type[1] == 'c' || type[1] == 'x', 7, "type of convolution can 'x' or 'c'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel; if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *type == 'f', 2, "conv2Dmv : Input image is smaller than kernel"); if (*type == 'f') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { // valid nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } long nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } long k; #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { long i; /* get output */ real *ptr_output = output_data + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ if (type[0] == 'f') if (type[1] == 'x') THLab_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THLab_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (type[1] == 'x') THLab_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THLab_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows;*/ } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THOmpLab_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *type) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long kstride0, kstride1; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(type[0] == 'v' || type[0] == 'f', 7, "type of convolution can 'v' or 'f'"); THArgCheck(type[1] == 'c' || type[1] == 'x', 7, "type of convolution can 'x' or 'c'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel; if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } long nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *type == 'f', 2, "conv2Dmv : Input image is smaller than kernel"); if (*type == 'f') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { // valid nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long p; #pragma omp parallel for private(p) for (p=0; p < r_->size[0]; p++) { long k; for (k = 0; k < r_->size[1]; k++) { real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long p; #pragma omp parallel for private(p) for (p=0; p < r_->size[0]; p++) { long k; for (k = 0; k < r_->size[1]; k++) { real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } } long p; #pragma omp parallel for private(p) for (p=0; p < nbatch; p++) { long k; for(k = 0; k < nOutputPlane; k++) { long i; /* get output */ real *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols; /* do image, kernel convolution */ if (type[0] == 'f') if (type[1] == 'x') THLab_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THLab_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (type[1] == 'x') THLab_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THLab_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows;*/ } } THTensor_(free)(input); THTensor_(free)(kernel); } #endif
__clang_openmp_device_functions.h
/*===- __clang_openmp_device_functions.h - OpenMP device function declares -=== * * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. * See https://llvm.org/LICENSE.txt for license information. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * *===-----------------------------------------------------------------------=== */ #ifndef __CLANG_OPENMP_DEVICE_FUNCTIONS_H__ #define __CLANG_OPENMP_DEVICE_FUNCTIONS_H__ #ifndef _OPENMP #error "This file is for OpenMP compilation only." #endif #ifdef __cplusplus extern "C" { #endif #pragma omp begin declare variant match( \ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) #define __CUDA__ #define __OPENMP_NVPTX__ /// Include declarations for libdevice functions. #include <__clang_cuda_libdevice_declares.h> /// Provide definitions for these functions. #include <__clang_cuda_device_functions.h> #undef __OPENMP_NVPTX__ #undef __CUDA__ #pragma omp end declare variant #ifdef __AMDGCN__ #pragma omp begin declare variant match(device = {arch(amdgcn)}) // Import types which will be used by __clang_hip_libdevice_declares.h #ifndef __cplusplus #include <stdbool.h> #include <stdint.h> #endif #define __OPENMP_AMDGCN__ #pragma push_macro("__device__") #define __device__ /// Include declarations for libdevice functions. #include <__clang_hip_libdevice_declares.h> #pragma pop_macro("__device__") #undef __OPENMP_AMDGCN__ #pragma omp end declare variant #endif #ifdef __cplusplus } // extern "C" #endif // Ensure we make `_ZdlPv`, aka. `operator delete(void*)` available without the // need to `include <new>` in C++ mode. #ifdef __cplusplus // We require malloc/free. #include <cstdlib> #pragma push_macro("OPENMP_NOEXCEPT") #if __cplusplus >= 201103L #define OPENMP_NOEXCEPT noexcept #else #define OPENMP_NOEXCEPT #endif // Device overrides for non-placement new and delete. inline void *operator new(__SIZE_TYPE__ size) { if (size == 0) size = 1; return ::malloc(size); } inline void *operator new[](__SIZE_TYPE__ size) { return ::operator new(size); } inline void operator delete(void *ptr)OPENMP_NOEXCEPT { ::free(ptr); } inline void operator delete[](void *ptr) OPENMP_NOEXCEPT { ::operator delete(ptr); } // Sized delete, C++14 only. #if __cplusplus >= 201402L inline void operator delete(void *ptr, __SIZE_TYPE__ size)OPENMP_NOEXCEPT { ::operator delete(ptr); } inline void operator delete[](void *ptr, __SIZE_TYPE__ size) OPENMP_NOEXCEPT { ::operator delete(ptr); } #endif #pragma pop_macro("OPENMP_NOEXCEPT") #endif #endif
SymOp.h
void forward(double *y, const double *x, int n){ #pragma omp parallel for for(int i=0;i<n;i++){ y[i*9] = x[6*i+0]; y[i*9+1] = x[6*i+1]; y[i*9+2] = x[6*i+2]; y[i*9+3] = x[6*i+1]; y[i*9+4] = x[6*i+3]; y[i*9+5] = x[6*i+4]; y[i*9+6] = x[6*i+2]; y[i*9+7] = x[6*i+4]; y[i*9+8] = x[6*i+5]; } } void backward(double *grad_x, const double *grad_y, const double *y, const double *x, int n){ for(int i=0;i<n;i++){ grad_x[6*i+0] = grad_y[i*9]; grad_x[6*i+1] = grad_y[i*9+1] + grad_y[i*9+3]; grad_x[6*i+2] = grad_y[i*9+2] + grad_y[i*9+6]; grad_x[6*i+3] = grad_y[i*9+4]; grad_x[6*i+4] = grad_y[i*9+5] + grad_y[i*9+7]; grad_x[6*i+5] = grad_y[i*9+8]; } }
floyd_gpu.c
/* Analisar porque SIZE>=419 gera erro de falha de segmentação This program calculates the distance between the k neighbors in a Cartesian map. It generates a matrix with the distance between the neighbors. This program create a csv file with the time execution results for each function(CPU,GPU) in this format: size of matrix, cpu time, gpu time. Author: Gleison Souza Diniz Mendonça Date: 04-01-2015 version 1.0 Run: folder_ipmacc/ipmacc folder_archive/k-nearest.c ./a.out */ #include "BenchmarksUtil.h" #include <assert.h> #include <limits.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define ERROR_THRESHOLD 0.05 /// initialize the cartesian map void init(int *matrix, int *matrix_dist_cpu, int *matrix_dist_gpu) { int i, j, r, m; for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { matrix[i * SIZE + j] = 99999999; matrix_dist_cpu[i * SIZE + j] = 99999999; matrix_dist_gpu[i * SIZE + j] = 99999999; } } for (i = 0; i < SIZE; i++) { r = (i * 97) % SIZE; for (j = 0; j < r; j++) { m = (((j * 1021) * 71 % (SIZE * SIZE)) + 1); matrix[i * SIZE + j] = m; if (i == j) { matrix[i * SIZE + j] = 0; } } } } /// Knearest algorithm GPU /// s = size of cartesian map void Knearest_GPU(int *matrix, int *matrix_dist) { int i, j, k; for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { if (matrix[i * SIZE + j] != 99999999) { matrix_dist[i * SIZE + j] = matrix[i * SIZE + j]; } } matrix_dist[i * SIZE + i] = 0; } /// opportunity of parallelism here #pragma omp target map(tofrom : matrix_dist[ : SIZE *SIZE]) device(DEVICE_ID) { for (i = 0; i < SIZE; i++) { #pragma omp parallel for for (k = 0; k < SIZE; k++) { for (j = 0; j < SIZE; j++) { if (matrix_dist[k * SIZE + i] != 99999999 && matrix_dist[i * SIZE + j] != 99999999 && matrix_dist[k * SIZE + j] > matrix_dist[k * SIZE + i] + matrix_dist[i * SIZE + j]) { matrix_dist[k * SIZE + j] = matrix_dist[k * SIZE + i] + matrix_dist[i * SIZE + j]; } } } } } } void Knearest_CPU(int *matrix, int *matrix_dist) { int i, j, k; for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { if (matrix[i * SIZE + j] != 99999999) { matrix_dist[i * SIZE + j] = matrix[i * SIZE + j]; } } matrix_dist[i * SIZE + i] = 0; } /// opportunity of parallelism here for (i = 0; i < SIZE; i++) { for (k = 0; k < SIZE; k++) { for (j = 0; j < SIZE; j++) { if (matrix_dist[k * SIZE + i] != 99999999 && matrix_dist[i * SIZE + j] != 99999999 && matrix_dist[k * SIZE + j] > matrix_dist[k * SIZE + i] + matrix_dist[i * SIZE + j]) { matrix_dist[k * SIZE + j] = matrix_dist[k * SIZE + i] + matrix_dist[i * SIZE + j]; } } } } } int compareResults(int *B, int *B_GPU) { int i, j, fail; fail = 0; // Compare B and B_GPU for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { if (percentDiff(B[i * SIZE + j], B_GPU[i * SIZE + j]) > ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); return fail; } int main(int argc, char *argv[]) { int i; int points, var; double t_start, t_end; int fail = 0; int *matrix; int *matrix_dist_cpu, *matrix_dist_gpu; fprintf(stdout, "<< K-nearest GPU >>\n"); matrix = (int *)malloc(sizeof(int) * SIZE * SIZE); matrix_dist_cpu = (int *)malloc(sizeof(int) * SIZE * SIZE); matrix_dist_gpu = (int *)malloc(sizeof(int) * SIZE * SIZE); init(matrix, matrix_dist_cpu, matrix_dist_gpu); t_start = rtclock(); Knearest_GPU(matrix, matrix_dist_gpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); Knearest_CPU(matrix, matrix_dist_cpu); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(matrix_dist_cpu, matrix_dist_gpu); #endif free(matrix); free(matrix_dist_cpu); free(matrix_dist_gpu); return fail; }
tree-ssa-loop-ivcanon.c
/* Induction variable canonicalization and loop peeling. Copyright (C) 2004-2018 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* This pass detects the loops that iterate a constant number of times, adds a canonical induction variable (step -1, tested against 0) and replaces the exit test. This enables the less powerful rtl level analysis to use this information. This might spoil the code in some cases (by increasing register pressure). Note that in the case the new variable is not needed, ivopts will get rid of it, so it might only be a problem when there are no other linear induction variables. In that case the created optimization possibilities are likely to pay up. We also perform - complete unrolling (or peeling) when the loops is rolling few enough times - simple peeling (i.e. copying few initial iterations prior the loop) when number of iteration estimate is known (typically by the profile info). */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "cgraph.h" #include "gimple-pretty-print.h" #include "fold-const.h" #include "profile.h" #include "gimple-fold.h" #include "tree-eh.h" #include "gimple-iterator.h" #include "tree-cfg.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "tree-into-ssa.h" #include "cfgloop.h" #include "tree-chrec.h" #include "tree-scalar-evolution.h" #include "params.h" #include "tree-inline.h" #include "tree-cfgcleanup.h" #include "builtins.h" /* Specifies types of loops that may be unrolled. */ enum unroll_level { UL_SINGLE_ITER, /* Only loops that exit immediately in the first iteration. */ UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase of code size. */ UL_ALL /* All suitable loops. */ }; /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT is the exit edge whose condition is replaced. The ssa versions of the new IV before and after increment will be stored in VAR_BEFORE and VAR_AFTER if they are not NULL. */ void create_canonical_iv (struct loop *loop, edge exit, tree niter, tree *var_before = NULL, tree *var_after = NULL) { edge in; tree type, var; gcond *cond; gimple_stmt_iterator incr_at; enum tree_code cmp; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num); print_generic_expr (dump_file, niter, TDF_SLIM); fprintf (dump_file, " iterations.\n"); } cond = as_a <gcond *> (last_stmt (exit->src)); in = EDGE_SUCC (exit->src, 0); if (in == exit) in = EDGE_SUCC (exit->src, 1); /* Note that we do not need to worry about overflows, since type of niter is always unsigned and all comparisons are just for equality/nonequality -- i.e. everything works with a modulo arithmetics. */ type = TREE_TYPE (niter); niter = fold_build2 (PLUS_EXPR, type, niter, build_int_cst (type, 1)); incr_at = gsi_last_bb (in->src); create_iv (niter, build_int_cst (type, -1), NULL_TREE, loop, &incr_at, false, var_before, &var); if (var_after) *var_after = var; cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR; gimple_cond_set_code (cond, cmp); gimple_cond_set_lhs (cond, var); gimple_cond_set_rhs (cond, build_int_cst (type, 0)); update_stmt (cond); } /* Describe size of loop as detected by tree_estimate_loop_size. */ struct loop_size { /* Number of instructions in the loop. */ int overall; /* Number of instructions that will be likely optimized out in peeled iterations of loop (i.e. computation based on induction variable where induction variable starts at known constant.) */ int eliminated_by_peeling; /* Same statistics for last iteration of loop: it is smaller because instructions after exit are not executed. */ int last_iteration; int last_iteration_eliminated_by_peeling; /* If some IV computation will become constant. */ bool constant_iv; /* Number of call stmts that are not a builtin and are pure or const present on the hot path. */ int num_pure_calls_on_hot_path; /* Number of call stmts that are not a builtin and are not pure nor const present on the hot path. */ int num_non_pure_calls_on_hot_path; /* Number of statements other than calls in the loop. */ int non_call_stmts_on_hot_path; /* Number of branches seen on the hot path. */ int num_branches_on_hot_path; }; /* Return true if OP in STMT will be constant after peeling LOOP. */ static bool constant_after_peeling (tree op, gimple *stmt, struct loop *loop) { if (is_gimple_min_invariant (op)) return true; /* We can still fold accesses to constant arrays when index is known. */ if (TREE_CODE (op) != SSA_NAME) { tree base = op; /* First make fast look if we see constant array inside. */ while (handled_component_p (base)) base = TREE_OPERAND (base, 0); if ((DECL_P (base) && ctor_for_folding (base) != error_mark_node) || CONSTANT_CLASS_P (base)) { /* If so, see if we understand all the indices. */ base = op; while (handled_component_p (base)) { if (TREE_CODE (base) == ARRAY_REF && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop)) return false; base = TREE_OPERAND (base, 0); } return true; } return false; } /* Induction variables are constants when defined in loop. */ if (loop_containing_stmt (stmt) != loop) return false; tree ev = analyze_scalar_evolution (loop, op); if (chrec_contains_undetermined (ev) || chrec_contains_symbols (ev)) return false; return true; } /* Computes an estimated number of insns in LOOP. EXIT (if non-NULL) is an exite edge that will be eliminated in all but last iteration of the loop. EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration of loop. Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT. Stop estimating after UPPER_BOUND is met. Return true in this case. */ static bool tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size, int upper_bound) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned int i; bool after_exit; vec<basic_block> path = get_loop_hot_path (loop); size->overall = 0; size->eliminated_by_peeling = 0; size->last_iteration = 0; size->last_iteration_eliminated_by_peeling = 0; size->num_pure_calls_on_hot_path = 0; size->num_non_pure_calls_on_hot_path = 0; size->non_call_stmts_on_hot_path = 0; size->num_branches_on_hot_path = 0; size->constant_iv = 0; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); for (i = 0; i < loop->num_nodes; i++) { if (edge_to_cancel && body[i] != edge_to_cancel->src && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) after_exit = true; else after_exit = false; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit); for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); int num = estimate_num_insns (stmt, &eni_size_weights); bool likely_eliminated = false; bool likely_eliminated_last = false; bool likely_eliminated_peeled = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " size: %3i ", num); print_gimple_stmt (dump_file, gsi_stmt (gsi), 0); } /* Look for reasons why we might optimize this stmt away. */ if (!gimple_has_side_effects (stmt)) { /* Exit conditional. */ if (exit && body[i] == exit->src && stmt == last_stmt (exit->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in peeled copies.\n"); likely_eliminated_peeled = true; } if (edge_to_cancel && body[i] == edge_to_cancel->src && stmt == last_stmt (edge_to_cancel->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in last copy.\n"); likely_eliminated_last = true; } /* Sets of IV variables */ if (gimple_code (stmt) == GIMPLE_ASSIGN && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Induction variable computation will" " be folded away.\n"); likely_eliminated = true; } /* Assignments of IV variables. */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop) && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS || constant_after_peeling (gimple_assign_rhs2 (stmt), stmt, loop))) { size->constant_iv = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant expression will be folded away.\n"); likely_eliminated = true; } /* Conditionals. */ else if ((gimple_code (stmt) == GIMPLE_COND && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop) /* We don't simplify all constant compares so make sure they are not both constant already. See PR70288. */ && (! is_gimple_min_invariant (gimple_cond_lhs (stmt)) || ! is_gimple_min_invariant (gimple_cond_rhs (stmt)))) || (gimple_code (stmt) == GIMPLE_SWITCH && constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop) && ! is_gimple_min_invariant (gimple_switch_index (as_a <gswitch *> (stmt))))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant conditional.\n"); likely_eliminated = true; } } size->overall += num; if (likely_eliminated || likely_eliminated_peeled) size->eliminated_by_peeling += num; if (!after_exit) { size->last_iteration += num; if (likely_eliminated || likely_eliminated_last) size->last_iteration_eliminated_by_peeling += num; } if ((size->overall * 3 / 2 - size->eliminated_by_peeling - size->last_iteration_eliminated_by_peeling) > upper_bound) { free (body); path.release (); return true; } } } while (path.length ()) { basic_block bb = path.pop (); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_CALL && !gimple_inexpensive_call_p (as_a <gcall *> (stmt))) { int flags = gimple_call_flags (stmt); if (flags & (ECF_PURE | ECF_CONST)) size->num_pure_calls_on_hot_path++; else size->num_non_pure_calls_on_hot_path++; size->num_branches_on_hot_path ++; } /* Count inexpensive calls as non-calls, because they will likely expand inline. */ else if (gimple_code (stmt) != GIMPLE_DEBUG) size->non_call_stmts_on_hot_path++; if (((gimple_code (stmt) == GIMPLE_COND && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) || !constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))) || (gimple_code (stmt) == GIMPLE_SWITCH && !constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop))) && (!exit || bb != exit->src)) size->num_branches_on_hot_path++; } } path.release (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, size->eliminated_by_peeling, size->last_iteration, size->last_iteration_eliminated_by_peeling); free (body); return false; } /* Estimate number of insns of completely unrolled loop. It is (NUNROLL + 1) * size of loop body with taking into account the fact that in last copy everything after exit conditional is dead and that some instructions will be eliminated after peeling. Loop body is likely going to simplify further, this is difficult to guess, we just decrease the result by 1/3. */ static unsigned HOST_WIDE_INT estimated_unrolled_size (struct loop_size *size, unsigned HOST_WIDE_INT nunroll) { HOST_WIDE_INT unr_insns = ((nunroll) * (HOST_WIDE_INT) (size->overall - size->eliminated_by_peeling)); if (!nunroll) unr_insns = 0; unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling; unr_insns = unr_insns * 2 / 3; if (unr_insns <= 0) unr_insns = 1; return unr_insns; } /* Loop LOOP is known to not loop. See if there is an edge in the loop body that can be remove to make the loop to always exit and at the same time it does not make any code potentially executed during the last iteration dead. After complete unrolling we still may get rid of the conditional on the exit in the last copy even if we have no idea what it does. This is quite common case for loops of form int a[5]; for (i=0;i<b;i++) a[i]=0; Here we prove the loop to iterate 5 times but we do not know it from induction variable. For now we handle only simple case where there is exit condition just before the latch block and the latch block contains no statements with side effect that may otherwise terminate the execution of loop (such as by EH or by terminating the program or longjmp). In the general case we may want to cancel the paths leading to statements loop-niter identified as having undefined effect in the last iteration. The other cases are hopefully rare and will be cleaned up later. */ static edge loop_edge_to_cancel (struct loop *loop) { vec<edge> exits; unsigned i; edge edge_to_cancel; gimple_stmt_iterator gsi; /* We want only one predecestor of the loop. */ if (EDGE_COUNT (loop->latch->preds) > 1) return NULL; exits = get_loop_exit_edges (loop); FOR_EACH_VEC_ELT (exits, i, edge_to_cancel) { /* Find the other edge than the loop exit leaving the conditoinal. */ if (EDGE_COUNT (edge_to_cancel->src->succs) != 2) continue; if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel) edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1); else edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0); /* We only can handle conditionals. */ if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) continue; /* We should never have conditionals in the loop latch. */ gcc_assert (edge_to_cancel->dest != loop->header); /* Check that it leads to loop latch. */ if (edge_to_cancel->dest != loop->latch) continue; exits.release (); /* Verify that the code in loop latch does nothing that may end program execution without really reaching the exit. This may include non-pure/const function calls, EH statements, volatile ASMs etc. */ for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi)) if (gimple_has_side_effects (gsi_stmt (gsi))) return NULL; return edge_to_cancel; } exits.release (); return NULL; } /* Remove all tests for exits that are known to be taken after LOOP was peeled NPEELED times. Put gcc_unreachable before every statement known to not be executed. */ static bool remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled) { struct nb_iter_bound *elt; bool changed = false; for (elt = loop->bounds; elt; elt = elt->next) { /* If statement is known to be undefined after peeling, turn it into unreachable (or trap when debugging experience is supposed to be good). */ if (!elt->is_exit && wi::ltu_p (elt->bound, npeeled)) { gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt); gcall *stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); gimple_set_location (stmt, gimple_location (elt->stmt)); gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); split_block (gimple_bb (stmt), stmt); changed = true; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Forced statement unreachable: "); print_gimple_stmt (dump_file, elt->stmt, 0); } } /* If we know the exit will be taken after peeling, update. */ else if (elt->is_exit && wi::leu_p (elt->bound, npeeled)) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Forced exit to be taken: "); print_gimple_stmt (dump_file, elt->stmt, 0); } if (!loop_exit_edge_p (loop, exit_edge)) exit_edge = EDGE_SUCC (bb, 1); exit_edge->probability = profile_probability::always (); gcc_checking_assert (loop_exit_edge_p (loop, exit_edge)); gcond *cond_stmt = as_a <gcond *> (elt->stmt); if (exit_edge->flags & EDGE_TRUE_VALUE) gimple_cond_make_true (cond_stmt); else gimple_cond_make_false (cond_stmt); update_stmt (cond_stmt); changed = true; } } return changed; } /* Remove all exits that are known to be never taken because of the loop bound discovered. */ static bool remove_redundant_iv_tests (struct loop *loop) { struct nb_iter_bound *elt; bool changed = false; if (!loop->any_upper_bound) return false; for (elt = loop->bounds; elt; elt = elt->next) { /* Exit is pointless if it won't be taken before loop reaches upper bound. */ if (elt->is_exit && loop->any_upper_bound && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound)) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); struct tree_niter_desc niter; if (!loop_exit_edge_p (loop, exit_edge)) exit_edge = EDGE_SUCC (bb, 1); /* Only when we know the actual number of iterations, not just a bound, we can remove the exit. */ if (!number_of_iterations_exit (loop, exit_edge, &niter, false, false) || !integer_onep (niter.assumptions) || !integer_zerop (niter.may_be_zero) || !niter.niter || TREE_CODE (niter.niter) != INTEGER_CST || !wi::ltu_p (loop->nb_iterations_upper_bound, wi::to_widest (niter.niter))) continue; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Removed pointless exit: "); print_gimple_stmt (dump_file, elt->stmt, 0); } gcond *cond_stmt = as_a <gcond *> (elt->stmt); if (exit_edge->flags & EDGE_TRUE_VALUE) gimple_cond_make_false (cond_stmt); else gimple_cond_make_true (cond_stmt); update_stmt (cond_stmt); changed = true; } } return changed; } /* Stores loops that will be unlooped and edges that will be removed after we process whole loop tree. */ static vec<loop_p> loops_to_unloop; static vec<int> loops_to_unloop_nunroll; static vec<edge> edges_to_remove; /* Stores loops that has been peeled. */ static bitmap peeled_loops; /* Cancel all fully unrolled loops by putting __builtin_unreachable on the latch edge. We do it after all unrolling since unlooping moves basic blocks across loop boundaries trashing loop closed SSA form as well as SCEV info needed to be intact during unrolling. IRRED_INVALIDATED is used to bookkeep if information about irreducible regions may become invalid as a result of the transformation. LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case when we need to go into loop closed SSA form. */ static void unloop_loops (bitmap loop_closed_ssa_invalidated, bool *irred_invalidated) { while (loops_to_unloop.length ()) { struct loop *loop = loops_to_unloop.pop (); int n_unroll = loops_to_unloop_nunroll.pop (); basic_block latch = loop->latch; edge latch_edge = loop_latch_edge (loop); int flags = latch_edge->flags; location_t locus = latch_edge->goto_locus; gcall *stmt; gimple_stmt_iterator gsi; remove_exits_and_undefined_stmts (loop, n_unroll); /* Unloop destroys the latch edge. */ unloop (loop, irred_invalidated, loop_closed_ssa_invalidated); /* Create new basic block for the latch edge destination and wire it in. */ stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags); latch_edge->probability = profile_probability::never (); latch_edge->flags |= flags; latch_edge->goto_locus = locus; add_bb_to_loop (latch_edge->dest, current_loops->tree_root); latch_edge->dest->count = profile_count::zero (); set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src); gsi = gsi_start_bb (latch_edge->dest); gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); } loops_to_unloop.release (); loops_to_unloop_nunroll.release (); /* Remove edges in peeled copies. Given remove_path removes dominated regions we need to cope with removal of already removed paths. */ unsigned i; edge e; auto_vec<int, 20> src_bbs; src_bbs.reserve_exact (edges_to_remove.length ()); FOR_EACH_VEC_ELT (edges_to_remove, i, e) src_bbs.quick_push (e->src->index); FOR_EACH_VEC_ELT (edges_to_remove, i, e) if (BASIC_BLOCK_FOR_FN (cfun, src_bbs[i])) { bool ok = remove_path (e, irred_invalidated, loop_closed_ssa_invalidated); gcc_assert (ok); } edges_to_remove.release (); } /* Tries to unroll LOOP completely, i.e. NITER times. UL determines which loops we are allowed to unroll. EXIT is the exit of the loop that should be eliminated. MAXITER specfy bound on number of iterations, -1 if it is not known or too large for HOST_WIDE_INT. The location LOCUS corresponding to the loop is used when emitting a summary of the unroll to the dump file. */ static bool try_unroll_loop_completely (struct loop *loop, edge exit, tree niter, bool may_be_zero, enum unroll_level ul, HOST_WIDE_INT maxiter, location_t locus, bool allow_peel) { unsigned HOST_WIDE_INT n_unroll = 0; bool n_unroll_found = false; edge edge_to_cancel = NULL; /* See if we proved number of iterations to be low constant. EXIT is an edge that will be removed in all but last iteration of the loop. EDGE_TO_CACNEL is an edge that will be removed from the last iteration of the unrolled sequence and is expected to make the final loop not rolling. If the number of execution of loop is determined by standard induction variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving from the iv test. */ if (tree_fits_uhwi_p (niter)) { n_unroll = tree_to_uhwi (niter); n_unroll_found = true; edge_to_cancel = EDGE_SUCC (exit->src, 0); if (edge_to_cancel == exit) edge_to_cancel = EDGE_SUCC (exit->src, 1); } /* We do not know the number of iterations and thus we can not eliminate the EXIT edge. */ else exit = NULL; /* See if we can improve our estimate by using recorded loop bounds. */ if ((allow_peel || maxiter == 0 || ul == UL_NO_GROWTH) && maxiter >= 0 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll)) { n_unroll = maxiter; n_unroll_found = true; /* Loop terminates before the IV variable test, so we can not remove it in the last iteration. */ edge_to_cancel = NULL; } if (!n_unroll_found) return false; if (!loop->unroll && n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d " "(--param max-completely-peel-times limit reached).\n", loop->num); return false; } if (!edge_to_cancel) edge_to_cancel = loop_edge_to_cancel (loop); if (n_unroll) { if (ul == UL_SINGLE_ITER) return false; if (loop->unroll) { /* If the unrolling factor is too large, bail out. */ if (n_unroll > (unsigned)loop->unroll) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "user didn't want it unrolled completely.\n", loop->num); return false; } } else { struct loop_size size; /* EXIT can be removed only if we are sure it passes first N_UNROLL iterations. */ bool remove_exit = (exit && niter && TREE_CODE (niter) == INTEGER_CST && wi::leu_p (n_unroll, wi::to_widest (niter))); bool large = tree_estimate_loop_size (loop, remove_exit ? exit : NULL, edge_to_cancel, &size, PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)); if (large) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: it is too large.\n", loop->num); return false; } unsigned HOST_WIDE_INT ninsns = size.overall; unsigned HOST_WIDE_INT unr_insns = estimated_unrolled_size (&size, n_unroll); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Loop size: %d\n", (int) ninsns); fprintf (dump_file, " Estimated size after unrolling: %d\n", (int) unr_insns); } /* If the code is going to shrink, we don't need to be extra cautious on guessing if the unrolling is going to be profitable. */ if (unr_insns /* If there is IV variable that will become constant, we save one instruction in the loop prologue we do not account otherwise. */ <= ninsns + (size.constant_iv != false)) ; /* We unroll only inner loops, because we do not consider it profitable otheriwse. We still can cancel loopback edge of not rolling loop; this is always a good idea. */ else if (ul == UL_NO_GROWTH) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: size would grow.\n", loop->num); return false; } /* Outer loops tend to be less interesting candidates for complete unrolling unless we can do a lot of propagation into the inner loop body. For now we disable outer loop unrolling when the code would grow. */ else if (loop->inner) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "it is not innermost and code would grow.\n", loop->num); return false; } /* If there is call on a hot path through the loop, then there is most probably not much to optimize. */ else if (size.num_non_pure_calls_on_hot_path) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "contains call and code would grow.\n", loop->num); return false; } /* If there is pure/const call in the function, then we can still optimize the unrolled loop body if it contains some other interesting code than the calls and code storing or cumulating the return value. */ else if (size.num_pure_calls_on_hot_path /* One IV increment, one test, one ivtmp store and one useful stmt. That is about minimal loop doing pure call. */ && (size.non_call_stmts_on_hot_path <= 3 + size.num_pure_calls_on_hot_path)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "contains just pure calls and code would grow.\n", loop->num); return false; } /* Complete unrolling is major win when control flow is removed and one big basic block is created. If the loop contains control flow the optimization may still be a win because of eliminating the loop overhead but it also may blow the branch predictor tables. Limit number of branches on the hot path through the peeled sequence. */ else if (size.num_branches_on_hot_path * (int)n_unroll > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "number of branches on hot path in the unrolled " "sequence reaches --param max-peel-branches limit.\n", loop->num); return false; } else if (unr_insns > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "number of insns in the unrolled sequence reaches " "--param max-completely-peeled-insns limit.\n", loop->num); return false; } } initialize_original_copy_tables (); auto_sbitmap wont_exit (n_unroll + 1); if (exit && niter && TREE_CODE (niter) == INTEGER_CST && wi::leu_p (n_unroll, wi::to_widest (niter))) { bitmap_ones (wont_exit); if (wi::eq_p (wi::to_widest (niter), n_unroll) || edge_to_cancel) bitmap_clear_bit (wont_exit, 0); } else { exit = NULL; bitmap_clear (wont_exit); } if (may_be_zero) bitmap_clear_bit (wont_exit, 1); if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), n_unroll, wont_exit, exit, &edges_to_remove, DLTHE_FLAG_UPDATE_FREQ | DLTHE_FLAG_COMPLETTE_PEEL)) { free_original_copy_tables (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Failed to duplicate the loop\n"); return false; } free_original_copy_tables (); } /* Remove the conditional from the last copy of the loop. */ if (edge_to_cancel) { gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src)); force_edge_cold (edge_to_cancel, true); if (edge_to_cancel->flags & EDGE_TRUE_VALUE) gimple_cond_make_false (cond); else gimple_cond_make_true (cond); update_stmt (cond); /* Do not remove the path, as doing so may remove outer loop and confuse bookkeeping code in tree_unroll_loops_completely. */ } /* Store the loop for later unlooping and exit removal. */ loops_to_unloop.safe_push (loop); loops_to_unloop_nunroll.safe_push (n_unroll); if (dump_enabled_p ()) { if (!n_unroll) dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus, "loop turned into non-loop; it never loops\n"); else { dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus, "loop with %d iterations completely unrolled", (int) n_unroll); if (loop->header->count.initialized_p ()) dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, " (header execution count %d)", (int)loop->header->count.to_gcov_type ()); dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n"); } } if (dump_file && (dump_flags & TDF_DETAILS)) { if (exit) fprintf (dump_file, "Exit condition of peeled iterations was " "eliminated.\n"); if (edge_to_cancel) fprintf (dump_file, "Last iteration exit edge was proved true.\n"); else fprintf (dump_file, "Latch of last iteration was marked by " "__builtin_unreachable ().\n"); } return true; } /* Return number of instructions after peeling. */ static unsigned HOST_WIDE_INT estimated_peeled_sequence_size (struct loop_size *size, unsigned HOST_WIDE_INT npeel) { return MAX (npeel * (HOST_WIDE_INT) (size->overall - size->eliminated_by_peeling), 1); } /* If the loop is expected to iterate N times and is small enough, duplicate the loop body N+1 times before the loop itself. This way the hot path will never enter the loop. Parameters are the same as for try_unroll_loops_completely */ static bool try_peel_loop (struct loop *loop, edge exit, tree niter, bool may_be_zero, HOST_WIDE_INT maxiter) { HOST_WIDE_INT npeel; struct loop_size size; int peeled_size; if (!flag_peel_loops || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0 || !peeled_loops) return false; if (bitmap_bit_p (peeled_loops, loop->num)) { if (dump_file) fprintf (dump_file, "Not peeling: loop is already peeled\n"); return false; } /* We don't peel loops that will be unrolled as this can duplicate a loop more times than the user requested. */ if (loop->unroll) { if (dump_file) fprintf (dump_file, "Not peeling: user didn't want it peeled.\n"); return false; } /* Peel only innermost loops. While the code is perfectly capable of peeling non-innermost loops, the heuristics would probably need some improvements. */ if (loop->inner) { if (dump_file) fprintf (dump_file, "Not peeling: outer loop\n"); return false; } if (!optimize_loop_for_speed_p (loop)) { if (dump_file) fprintf (dump_file, "Not peeling: cold loop\n"); return false; } /* Check if there is an estimate on the number of iterations. */ npeel = estimated_loop_iterations_int (loop); if (npeel < 0) npeel = likely_max_loop_iterations_int (loop); if (npeel < 0) { if (dump_file) fprintf (dump_file, "Not peeling: number of iterations is not " "estimated\n"); return false; } if (maxiter >= 0 && maxiter <= npeel) { if (dump_file) fprintf (dump_file, "Not peeling: upper bound is known so can " "unroll completely\n"); return false; } /* We want to peel estimated number of iterations + 1 (so we never enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES and be sure to avoid overflows. */ if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1) { if (dump_file) fprintf (dump_file, "Not peeling: rolls too much " "(%i + 1 > --param max-peel-times)\n", (int) npeel); return false; } npeel++; /* Check peeled loops size. */ tree_estimate_loop_size (loop, exit, NULL, &size, PARAM_VALUE (PARAM_MAX_PEELED_INSNS)); if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel)) > PARAM_VALUE (PARAM_MAX_PEELED_INSNS)) { if (dump_file) fprintf (dump_file, "Not peeling: peeled sequence size is too large " "(%i insns > --param max-peel-insns)", peeled_size); return false; } /* Duplicate possibly eliminating the exits. */ initialize_original_copy_tables (); auto_sbitmap wont_exit (npeel + 1); if (exit && niter && TREE_CODE (niter) == INTEGER_CST && wi::leu_p (npeel, wi::to_widest (niter))) { bitmap_ones (wont_exit); bitmap_clear_bit (wont_exit, 0); } else { exit = NULL; bitmap_clear (wont_exit); } if (may_be_zero) bitmap_clear_bit (wont_exit, 1); if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), npeel, wont_exit, exit, &edges_to_remove, DLTHE_FLAG_UPDATE_FREQ)) { free_original_copy_tables (); return false; } free_original_copy_tables (); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Peeled loop %d, %i times.\n", loop->num, (int) npeel); } if (loop->any_estimate) { if (wi::ltu_p (npeel, loop->nb_iterations_estimate)) loop->nb_iterations_estimate -= npeel; else loop->nb_iterations_estimate = 0; } if (loop->any_upper_bound) { if (wi::ltu_p (npeel, loop->nb_iterations_upper_bound)) loop->nb_iterations_upper_bound -= npeel; else loop->nb_iterations_upper_bound = 0; } if (loop->any_likely_upper_bound) { if (wi::ltu_p (npeel, loop->nb_iterations_likely_upper_bound)) loop->nb_iterations_likely_upper_bound -= npeel; else { loop->any_estimate = true; loop->nb_iterations_estimate = 0; loop->nb_iterations_likely_upper_bound = 0; } } profile_count entry_count = profile_count::zero (); edge e; edge_iterator ei; FOR_EACH_EDGE (e, ei, loop->header->preds) if (e->src != loop->latch) { if (e->src->count.initialized_p ()) entry_count = e->src->count + e->src->count; gcc_assert (!flow_bb_inside_loop_p (loop, e->src)); } profile_probability p = profile_probability::very_unlikely (); p = entry_count.probability_in (loop->header->count); scale_loop_profile (loop, p, 0); bitmap_set_bit (peeled_loops, loop->num); return true; } /* Adds a canonical induction variable to LOOP if suitable. CREATE_IV is true if we may create a new iv. UL determines which loops we are allowed to completely unroll. If TRY_EVAL is true, we try to determine the number of iterations of a loop by direct evaluation. Returns true if cfg is changed. */ static bool canonicalize_loop_induction_variables (struct loop *loop, bool create_iv, enum unroll_level ul, bool try_eval, bool allow_peel) { edge exit = NULL; tree niter; HOST_WIDE_INT maxiter; bool modified = false; location_t locus = UNKNOWN_LOCATION; struct tree_niter_desc niter_desc; bool may_be_zero = false; /* For unrolling allow conditional constant or zero iterations, thus perform loop-header copying on-the-fly. */ exit = single_exit (loop); niter = chrec_dont_know; if (exit && number_of_iterations_exit (loop, exit, &niter_desc, false)) { niter = niter_desc.niter; may_be_zero = niter_desc.may_be_zero && !integer_zerop (niter_desc.may_be_zero); } if (TREE_CODE (niter) == INTEGER_CST) locus = gimple_location_safe (last_stmt (exit->src)); else { /* For non-constant niter fold may_be_zero into niter again. */ if (may_be_zero) { if (COMPARISON_CLASS_P (niter_desc.may_be_zero)) niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), niter_desc.may_be_zero, build_int_cst (TREE_TYPE (niter), 0), niter); else niter = chrec_dont_know; may_be_zero = false; } /* If the loop has more than one exit, try checking all of them for # of iterations determinable through scev. */ if (!exit) niter = find_loop_niter (loop, &exit); /* Finally if everything else fails, try brute force evaluation. */ if (try_eval && (chrec_contains_undetermined (niter) || TREE_CODE (niter) != INTEGER_CST)) niter = find_loop_niter_by_eval (loop, &exit); if (exit) locus = gimple_location_safe (last_stmt (exit->src)); if (TREE_CODE (niter) != INTEGER_CST) exit = NULL; } /* We work exceptionally hard here to estimate the bound by find_loop_niter_by_eval. Be sure to keep it for future. */ if (niter && TREE_CODE (niter) == INTEGER_CST) { record_niter_bound (loop, wi::to_widest (niter), exit == single_likely_exit (loop), true); } /* Force re-computation of loop bounds so we can remove redundant exits. */ maxiter = max_loop_iterations_int (loop); if (dump_file && (dump_flags & TDF_DETAILS) && TREE_CODE (niter) == INTEGER_CST) { fprintf (dump_file, "Loop %d iterates ", loop->num); print_generic_expr (dump_file, niter, TDF_SLIM); fprintf (dump_file, " times.\n"); } if (dump_file && (dump_flags & TDF_DETAILS) && maxiter >= 0) { fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num, (int)maxiter); } if (dump_file && (dump_flags & TDF_DETAILS) && likely_max_loop_iterations_int (loop) >= 0) { fprintf (dump_file, "Loop %d likely iterates at most %i times.\n", loop->num, (int)likely_max_loop_iterations_int (loop)); } /* Remove exits that are known to be never taken based on loop bound. Needs to be called after compilation of max_loop_iterations_int that populates the loop bounds. */ modified |= remove_redundant_iv_tests (loop); if (try_unroll_loop_completely (loop, exit, niter, may_be_zero, ul, maxiter, locus, allow_peel)) return true; if (create_iv && niter && !chrec_contains_undetermined (niter) && exit && just_once_each_iteration_p (loop, exit->src)) { tree iv_niter = niter; if (may_be_zero) { if (COMPARISON_CLASS_P (niter_desc.may_be_zero)) iv_niter = fold_build3 (COND_EXPR, TREE_TYPE (iv_niter), niter_desc.may_be_zero, build_int_cst (TREE_TYPE (iv_niter), 0), iv_niter); else iv_niter = NULL_TREE; } if (iv_niter) create_canonical_iv (loop, exit, iv_niter); } if (ul == UL_ALL) modified |= try_peel_loop (loop, exit, niter, may_be_zero, maxiter); return modified; } /* The main entry point of the pass. Adds canonical induction variables to the suitable loops. */ unsigned int canonicalize_induction_variables (void) { struct loop *loop; bool changed = false; bool irred_invalidated = false; bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); estimate_numbers_of_iterations (cfun); FOR_EACH_LOOP (loop, LI_FROM_INNERMOST) { changed |= canonicalize_loop_induction_variables (loop, true, UL_SINGLE_ITER, true, false); } gcc_assert (!need_ssa_update_p (cfun)); unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated); if (irred_invalidated && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) mark_irreducible_loops (); /* Clean up the information about numbers of iterations, since brute force evaluation could reveal new information. */ free_numbers_of_iterations_estimates (cfun); scev_reset (); if (!bitmap_empty_p (loop_closed_ssa_invalidated)) { gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA)); rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); } BITMAP_FREE (loop_closed_ssa_invalidated); if (changed) return TODO_cleanup_cfg; return 0; } /* Propagate constant SSA_NAMEs defined in basic block BB. */ static void propagate_constants_for_unrolling (basic_block bb) { /* Look for degenerate PHI nodes with constant argument. */ for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); ) { gphi *phi = gsi.phi (); tree result = gimple_phi_result (phi); tree arg = gimple_phi_arg_def (phi, 0); if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (result) && gimple_phi_num_args (phi) == 1 && CONSTANT_CLASS_P (arg)) { replace_uses_by (result, arg); gsi_remove (&gsi, true); release_ssa_name (result); } else gsi_next (&gsi); } /* Look for assignments to SSA names with constant RHS. */ for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) { gimple *stmt = gsi_stmt (gsi); tree lhs; if (is_gimple_assign (stmt) && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_constant && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME) && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) { replace_uses_by (lhs, gimple_assign_rhs1 (stmt)); gsi_remove (&gsi, true); release_ssa_name (lhs); } else gsi_next (&gsi); } } /* Process loops from innermost to outer, stopping at the innermost loop we unrolled. */ static bool tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer, bitmap father_bbs, struct loop *loop) { struct loop *loop_father; bool changed = false; struct loop *inner; enum unroll_level ul; unsigned num = number_of_loops (cfun); /* Process inner loops first. Don't walk loops added by the recursive calls because SSA form is not up-to-date. They can be handled in the next iteration. */ for (inner = loop->inner; inner != NULL; inner = inner->next) if ((unsigned) inner->num < num) changed |= tree_unroll_loops_completely_1 (may_increase_size, unroll_outer, father_bbs, inner); /* If we changed an inner loop we cannot process outer loops in this iteration because SSA form is not up-to-date. Continue with siblings of outer loops instead. */ if (changed) return true; /* Don't unroll #pragma omp simd loops until the vectorizer attempts to vectorize those. */ if (loop->force_vectorize) return false; /* Try to unroll this loop. */ loop_father = loop_outer (loop); if (!loop_father) return false; if (loop->unroll > 1) ul = UL_ALL; else if (may_increase_size && optimize_loop_nest_for_speed_p (loop) /* Unroll outermost loops only if asked to do so or they do not cause code growth. */ && (unroll_outer || loop_outer (loop_father))) ul = UL_ALL; else ul = UL_NO_GROWTH; if (canonicalize_loop_induction_variables (loop, false, ul, !flag_tree_loop_ivcanon, unroll_outer)) { /* If we'll continue unrolling, we need to propagate constants within the new basic blocks to fold away induction variable computations; otherwise, the size might blow up before the iteration is complete and the IR eventually cleaned up. */ if (loop_outer (loop_father)) bitmap_set_bit (father_bbs, loop_father->header->index); return true; } return false; } /* Unroll LOOPS completely if they iterate just few times. Unless MAY_INCREASE_SIZE is true, perform the unrolling only if the size of the code does not increase. */ static unsigned int tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) { bitmap father_bbs = BITMAP_ALLOC (NULL); bool changed; int iteration = 0; bool irred_invalidated = false; estimate_numbers_of_iterations (cfun); do { changed = false; bitmap loop_closed_ssa_invalidated = NULL; if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); free_numbers_of_iterations_estimates (cfun); estimate_numbers_of_iterations (cfun); changed = tree_unroll_loops_completely_1 (may_increase_size, unroll_outer, father_bbs, current_loops->tree_root); if (changed) { unsigned i; unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated); /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */ if (loop_closed_ssa_invalidated && !bitmap_empty_p (loop_closed_ssa_invalidated)) rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated, TODO_update_ssa); else update_ssa (TODO_update_ssa); /* father_bbs is a bitmap of loop father header BB indices. Translate that to what non-root loops these BBs belong to now. */ bitmap_iterator bi; bitmap fathers = BITMAP_ALLOC (NULL); EXECUTE_IF_SET_IN_BITMAP (father_bbs, 0, i, bi) { basic_block unrolled_loop_bb = BASIC_BLOCK_FOR_FN (cfun, i); if (! unrolled_loop_bb) continue; if (loop_outer (unrolled_loop_bb->loop_father)) bitmap_set_bit (fathers, unrolled_loop_bb->loop_father->num); } bitmap_clear (father_bbs); /* Propagate the constants within the new basic blocks. */ EXECUTE_IF_SET_IN_BITMAP (fathers, 0, i, bi) { loop_p father = get_loop (cfun, i); basic_block *body = get_loop_body_in_dom_order (father); for (unsigned j = 0; j < father->num_nodes; j++) propagate_constants_for_unrolling (body[j]); free (body); } BITMAP_FREE (fathers); /* This will take care of removing completely unrolled loops from the loop structures so we can continue unrolling now innermost loops. */ if (cleanup_tree_cfg ()) update_ssa (TODO_update_ssa_only_virtuals); /* Clean up the information about numbers of iterations, since complete unrolling might have invalidated it. */ scev_reset (); if (flag_checking && loops_state_satisfies_p (LOOP_CLOSED_SSA)) verify_loop_closed_ssa (true); } if (loop_closed_ssa_invalidated) BITMAP_FREE (loop_closed_ssa_invalidated); } while (changed && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS)); BITMAP_FREE (father_bbs); if (irred_invalidated && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) mark_irreducible_loops (); return 0; } /* Canonical induction variable creation pass. */ namespace { const pass_data pass_data_iv_canon = { GIMPLE_PASS, /* type */ "ivcanon", /* name */ OPTGROUP_LOOP, /* optinfo_flags */ TV_TREE_LOOP_IVCANON, /* tv_id */ ( PROP_cfg | PROP_ssa ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_iv_canon : public gimple_opt_pass { public: pass_iv_canon (gcc::context *ctxt) : gimple_opt_pass (pass_data_iv_canon, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; } virtual unsigned int execute (function *fun); }; // class pass_iv_canon unsigned int pass_iv_canon::execute (function *fun) { if (number_of_loops (fun) <= 1) return 0; return canonicalize_induction_variables (); } } // anon namespace gimple_opt_pass * make_pass_iv_canon (gcc::context *ctxt) { return new pass_iv_canon (ctxt); } /* Complete unrolling of loops. */ namespace { const pass_data pass_data_complete_unroll = { GIMPLE_PASS, /* type */ "cunroll", /* name */ OPTGROUP_LOOP, /* optinfo_flags */ TV_COMPLETE_UNROLL, /* tv_id */ ( PROP_cfg | PROP_ssa ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_complete_unroll : public gimple_opt_pass { public: pass_complete_unroll (gcc::context *ctxt) : gimple_opt_pass (pass_data_complete_unroll, ctxt) {} /* opt_pass methods: */ virtual unsigned int execute (function *); }; // class pass_complete_unroll unsigned int pass_complete_unroll::execute (function *fun) { if (number_of_loops (fun) <= 1) return 0; /* If we ever decide to run loop peeling more than once, we will need to track loops already peeled in loop structures themselves to avoid re-peeling the same loop multiple times. */ if (flag_peel_loops) peeled_loops = BITMAP_ALLOC (NULL); unsigned int val = tree_unroll_loops_completely (flag_unroll_loops || flag_peel_loops || optimize >= 3, true); if (peeled_loops) { BITMAP_FREE (peeled_loops); peeled_loops = NULL; } return val; } } // anon namespace gimple_opt_pass * make_pass_complete_unroll (gcc::context *ctxt) { return new pass_complete_unroll (ctxt); } /* Complete unrolling of inner loops. */ namespace { const pass_data pass_data_complete_unrolli = { GIMPLE_PASS, /* type */ "cunrolli", /* name */ OPTGROUP_LOOP, /* optinfo_flags */ TV_COMPLETE_UNROLL, /* tv_id */ ( PROP_cfg | PROP_ssa ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_complete_unrolli : public gimple_opt_pass { public: pass_complete_unrolli (gcc::context *ctxt) : gimple_opt_pass (pass_data_complete_unrolli, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *) { return optimize >= 2; } virtual unsigned int execute (function *); }; // class pass_complete_unrolli unsigned int pass_complete_unrolli::execute (function *fun) { unsigned ret = 0; loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); if (number_of_loops (fun) > 1) { scev_initialize (); ret = tree_unroll_loops_completely (optimize >= 3, false); scev_finalize (); } loop_optimizer_finalize (); return ret; } } // anon namespace gimple_opt_pass * make_pass_complete_unrolli (gcc::context *ctxt) { return new pass_complete_unrolli (ctxt); }
sicm_low.c
#include "sicm_low.h" #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <math.h> #include <numa.h> #include <numaif.h> #include <sched.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> #include <sys/mman.h> // https://www.mail-archive.com/devel@lists.open-mpi.org/msg20403.html #ifndef MAP_HUGE_SHIFT #include <linux/mman.h> #endif #include "sicm_impl.h" #include "detect_devices.h" #ifdef HIP #include <hip/hip_runtime.h> #endif int normal_page_size = -1; sicm_device_tag sicm_get_device_tag(char *env) { size_t max_chars; max_chars = 32; if(strncmp(env, "SICM_DRAM", max_chars) == 0) { return SICM_DRAM; } else if(strncmp(env, "SICM_KNL_HBM", max_chars) == 0) { return SICM_KNL_HBM; } else if(strncmp(env, "SICM_POWERPC_HBM", max_chars) == 0) { return SICM_POWERPC_HBM; } return INVALID_TAG; } char * sicm_device_tag_str(sicm_device_tag tag) { switch(tag) { case SICM_DRAM: return "SICM_DRAM"; case SICM_KNL_HBM: return "SICM_KNL_HBM"; case SICM_POWERPC_HBM: return "SICM_POWERPC_HBM"; case SICM_OPTANE: return "SICM_OPTANE"; case SICM_HIP: return "SICM_HIP"; case INVALID_TAG: break; } return NULL; } static int sicm_device_compare(const void * lhs, const void * rhs) { sicm_device * l = * (sicm_device **) lhs; sicm_device * r = * (sicm_device **) rhs; if (l->node != r->node) { return l->node - r->node; } if (l->page_size != r->page_size) { return l->page_size - r->page_size; } return l->tag - r->tag; } /* Only initialize SICM once */ static int sicm_init_count = 0; static pthread_mutex_t sicm_init_count_mutex = PTHREAD_MUTEX_INITIALIZER; static sicm_device_list sicm_global_devices = {}; static sicm_device *sicm_global_device_array = NULL; /* set in sicm_init */ struct sicm_device *sicm_default_device_ptr = NULL; struct sicm_device_list sicm_init() { /* Check whether or not the global devices list has been initialized already */ pthread_mutex_lock(&sicm_init_count_mutex); if (sicm_init_count) { sicm_init_count++; pthread_mutex_unlock(&sicm_init_count_mutex); return sicm_global_devices; } // Find the number of huge page sizes int huge_page_size_count = 0; DIR* dir = opendir("/sys/kernel/mm/hugepages"); struct dirent* entry = NULL; while((entry = readdir(dir)) != NULL) if(entry->d_name[0] != '.') huge_page_size_count++; int* huge_page_sizes = malloc(huge_page_size_count * sizeof(int)); normal_page_size = numa_pagesize() / 1024; // Find the actual set of huge page sizes (reported in KiB) rewinddir(dir); int i = 0; while((entry = readdir(dir)) != NULL) { if(entry->d_name[0] != '.') { huge_page_sizes[i] = 0; int j; for(j = 0; j < 10; j++) { if(entry->d_name[j] == '\0') { j = -1; break; } } if(j < 0) break; for(; entry->d_name[j] >= '0' && entry->d_name[j] <= '9'; j++) { huge_page_sizes[i] *= 10; huge_page_sizes[i] += entry->d_name[j] - '0'; } i++; } } closedir(dir); const int node_count = get_node_count(); const int device_count = node_count * (huge_page_size_count + 1); sicm_global_device_array = malloc(device_count * sizeof(struct sicm_device)); // initialize the device list sicm_device **devices = malloc(device_count * sizeof(sicm_device *)); for(int i = 0; i < device_count; i++) { devices[i] = &sicm_global_device_array[i]; devices[i]->tag = INVALID_TAG; devices[i]->node = -1; devices[i]->page_size = -1; } const int idx = detect_devices(node_count, huge_page_sizes, huge_page_size_count, normal_page_size, devices); free(huge_page_sizes); qsort(devices, idx, sizeof(sicm_device *), sicm_device_compare); sicm_global_devices = (struct sicm_device_list){ .count = idx, .devices = devices }; sicm_default_device(0); sicm_init_count++; pthread_mutex_unlock(&sicm_init_count_mutex); return sicm_global_devices; } sicm_device *sicm_default_device(const unsigned int idx) { if (idx < sicm_global_devices.count) { sicm_default_device_ptr = sicm_global_devices.devices[idx]; } return sicm_default_device_ptr; } /* Frees memory up */ void sicm_fini() { pthread_mutex_lock(&sicm_init_count_mutex); if (sicm_init_count) { sicm_init_count--; if (sicm_init_count == 0) { free(sicm_global_devices.devices); free(sicm_global_device_array); memset(&sicm_global_devices, 0, sizeof(sicm_global_devices)); } } pthread_mutex_unlock(&sicm_init_count_mutex); } void sicm_device_list_free(sicm_device_list *devs) { if (devs == NULL) return; free(devs->devices); } sicm_device *sicm_find_device(sicm_device_list *devs, const sicm_device_tag type, const int page_size, sicm_device *old) { sicm_device *dev = NULL; if (devs) { unsigned int i; for(i = 0; i < devs->count; i++) { if ((devs->devices[i]->tag == type) && ((page_size == 0) || (sicm_device_page_size(devs->devices[i]) == page_size)) && !sicm_device_eq(devs->devices[i], old)) { dev = devs->devices[i]; break; } } } return dev; } void* sicm_device_alloc(struct sicm_device* device, size_t size) { switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM: ; // labels can't be followed by declarations int page_size = sicm_device_page_size(device); if(page_size == normal_page_size) return numa_alloc_onnode(size, sicm_numa_id(device)); else { int shift = 10; // i.e., 1024 int remaining = page_size; while(remaining > 1) { shift++; remaining >>= 1; } int old_mode; nodemask_t old_nodemask; get_mempolicy(&old_mode, old_nodemask.n, numa_max_node() + 2, NULL, 0); nodemask_t nodemask; nodemask_zero(&nodemask); nodemask_set_compat(&nodemask, sicm_numa_id(device)); set_mempolicy(MPOL_BIND, nodemask.n, numa_max_node() + 2); void* ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | (shift << MAP_HUGE_SHIFT), -1, 0); if(ptr == MAP_FAILED) { printf("huge page allocation error: %s\n", strerror(errno)); } set_mempolicy(old_mode, old_nodemask.n, numa_max_node() + 2); return ptr; } case SICM_HIP: #ifdef HIP { // record previously selected device int old_dev = -1; if (hipGetDevice(&old_dev) != hipSuccess) { return NULL; } hipSetDevice(device->data.hip.id); void *ptr = NULL; hipMalloc(&ptr, size); // restore previously selected device hipSetDevice(old_dev); return ptr; } #endif break; case INVALID_TAG: break; } printf("error in sicm_alloc: unknown tag\n"); exit(-1); } void* sicm_device_alloc_mmapped(struct sicm_device* device, size_t size, int fd, off_t offset) { switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM: ; // labels can't be followed by declarations int page_size = sicm_device_page_size(device); if(page_size == normal_page_size) return numa_alloc_onnode(size, sicm_numa_id(device)); else { int shift = 10; // i.e., 1024 int remaining = page_size; while(remaining > 1) { shift++; remaining >>= 1; } int old_mode; nodemask_t old_nodemask; get_mempolicy(&old_mode, old_nodemask.n, numa_max_node() + 2, NULL, 0); nodemask_t nodemask; nodemask_zero(&nodemask); nodemask_set_compat(&nodemask, sicm_numa_id(device)); set_mempolicy(MPOL_BIND, nodemask.n, numa_max_node() + 2); void* ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, offset); if(ptr == MAP_FAILED) { printf("huge page allocation error: %s\n", strerror(errno)); } set_mempolicy(old_mode, old_nodemask.n, numa_max_node() + 2); return ptr; } case SICM_HIP: case INVALID_TAG: break; } printf("error in sicm_alloc: unknown tag\n"); exit(-1); } int sicm_can_place_exact(struct sicm_device* device) { switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM: return 1; case SICM_HIP: case INVALID_TAG: break; } return 0; } void* sicm_alloc_exact(struct sicm_device* device, void* base, size_t size) { switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM: ; // labels can't be followed by declarations int page_size = sicm_device_page_size(device); if(page_size == normal_page_size) { int old_mode; nodemask_t old_nodemask; get_mempolicy(&old_mode, old_nodemask.n, numa_max_node() + 2, NULL, 0); nodemask_t nodemask; nodemask_zero(&nodemask); nodemask_set_compat(&nodemask, sicm_numa_id(device)); set_mempolicy(MPOL_BIND, nodemask.n, numa_max_node() + 2); void* ptr = mmap(base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0); if(ptr == (void*)-1) { printf("exact allocation error: %s\n", strerror(errno)); } set_mempolicy(old_mode, old_nodemask.n, numa_max_node() + 2); return ptr; } else { int shift = 10; // i.e., 1024 int remaining = page_size; while(remaining > 1) { shift++; remaining >>= 1; } int old_mode; nodemask_t old_nodemask; get_mempolicy(&old_mode, old_nodemask.n, numa_max_node() + 2, NULL, 0); nodemask_t nodemask; nodemask_zero(&nodemask); nodemask_set_compat(&nodemask, sicm_numa_id(device)); set_mempolicy(MPOL_BIND, nodemask.n, numa_max_node() + 2); void* ptr = mmap(base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS | MAP_HUGETLB | (shift << MAP_HUGE_SHIFT), -1, 0); printf("alloc exact: %p, %p\n", base, ptr); if(ptr == (void*)-1) { printf("huge page allocation error: %s\n", strerror(errno)); } set_mempolicy(old_mode, old_nodemask.n, numa_max_node() + 2); return ptr; } case SICM_HIP: case INVALID_TAG: break; } printf("error in sicm_alloc_exact: unknown tag\n"); exit(-1); } void sicm_device_free(struct sicm_device* device, void* ptr, size_t size) { switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM: if(sicm_device_page_size(device) == normal_page_size) //numa_free(ptr, size); munmap(ptr, size); else { // Huge page allocation occurs in whole page chunks, so we need // to free (unmap) in whole page chunks. int page_size = sicm_device_page_size(device); munmap(ptr, sicm_div_ceil(size, page_size * 1024) * page_size * 1024); } break; case SICM_HIP: #ifdef HIP hipFree(ptr); #endif break; case INVALID_TAG: default: printf("error in sicm_device_free: unknown tag\n"); exit(-1); } } int sicm_numa_id(struct sicm_device* device) { return device?device->node:-1; } int sicm_device_page_size(struct sicm_device* device) { return device?device->page_size:-1; } int sicm_device_eq(sicm_device* dev1, sicm_device* dev2) { if (!dev1 || !dev2) { return 0; } if (dev1 == dev2) { return 1; } if (dev1->tag != dev2->tag) { return 0; } if (dev1->node != dev2->node) { return 0; } if (dev1->page_size != dev2->page_size) { return 0; } switch(dev1->tag) { case SICM_DRAM: return 1; case SICM_KNL_HBM: return (dev1->data.knl_hbm.compute_node == dev2->data.knl_hbm.compute_node); case SICM_OPTANE: return (dev1->data.optane.compute_node == dev2->data.optane.compute_node); case SICM_POWERPC_HBM: return 1; case SICM_HIP: return (dev1->data.hip.id == dev2->data.hip.id); case INVALID_TAG: default: return 0; } return 0; } int sicm_move(struct sicm_device* src, struct sicm_device* dst, void* ptr, size_t size) { if(sicm_numa_id(src) >= 0) { int dst_node = sicm_numa_id(dst); if(dst_node >= 0) { nodemask_t nodemask; nodemask_zero(&nodemask); nodemask_set_compat(&nodemask, dst_node); return mbind(ptr, size, MPOL_BIND, nodemask.n, numa_max_node() + 2, MPOL_MF_MOVE); } } return -1; } int sicm_pin(struct sicm_device* device) { int ret = -1; switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM: #pragma omp parallel ret = numa_run_on_node(device->node); break; case SICM_HIP: case INVALID_TAG: break; } return ret; } /** * @input buf - sting with meminfo data * @input buf_len - length of buffer (buf) * @input field - field looking for (e.g., "MemFree") * @inout value - output result found in buf input * * @return: -1 (error), 0 (not found), 1 (found) * * @Notes: * - Note this assumes you do not split meminfo lines up, * or at least the fields you care about are fully contained * in the input buffer (i.e., not split up between reads and * get partial line of input in buf). * - Field names look like "MemTotal" * - Not very pretty, but gets the correct values from meminfo * likely needs some more bounds checking (e.g., buf[i]). */ static int parse_meminfo(char *buf, int buf_len, char *field, size_t *value) { char str[128]; int i; int found = 0; if (0 >= buf_len) { fprintf (stderr, "Error: Bad parameter (bugus buf_len)\n"); return -1; } if ((NULL == buf) || (NULL == field) || (NULL == value)) { fprintf (stderr, "Error: Bad parameter\n"); return -1; } for (i=0; i <= buf_len; i++) { if (buf[i] == field[0]) { char *s1 = &buf[i]; char *s2 = &field[0]; char tmp[128]; int k=0; while (*s1++ == *s2++) { i++; } if (buf[i] == ':') { /* This is our line of info */ /* Move past colon */ i++; /* Move past blank spaces (careful of buf_len) */ while ((i <= buf_len) && (buf[i] == ' ')) { i++; } /* * Grab digits before space and units, e.g., * Node 0 MemFree: 6348756 kB */ while ((i <= buf_len) && (buf[i] != ' ')) { tmp[k] = buf[i]; k++; i++; } tmp[k] = '\0'; *value = strtol(tmp, NULL, 0); /* Found, all done. */ found = 1; break; } /* NOT our match, keep looking*/ } } return found; } size_t sicm_capacity(struct sicm_device* device) { static const size_t path_len = 100; char path[path_len]; int i; switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM:; int node = sicm_numa_id(device); int page_size = sicm_device_page_size(device); if(page_size == normal_page_size) { snprintf(path, path_len, "/sys/devices/system/node/node%d/meminfo", node); int fd = open(path, O_RDONLY); #if 0 char data[31]; if (read(fd, data, 31) != 31) { close(fd); return -1; } close(fd); size_t res = 0; size_t factor = 1; for(i = 30; data[i] != ' '; i--) { res += factor * (data[i] - '0'); factor *= 10; } return res; #else char data[128]; if (read(fd, data, 128) != 128) { close(fd); return -1; } close(fd); size_t res = 0; int rc = 0; /* TODO: More testing */ rc = parse_meminfo(data, 128, "MemTotal", &res); if (rc <= 0) { fprintf(stderr, "Error: failed to get available memory for node %d\n", node); return -1; } return res; #endif } else { snprintf(path, path_len, "/sys/devices/system/node/node%d/hugepages/hugepages-%dkB/nr_hugepages", node, page_size); int fd = open(path, O_RDONLY); int pages = 0; char data[10]; while(read(fd, data, 10) > 0) { for(i = 0; i < 10; i++) { if(data[i] < '0' || data[i] > '9') break; pages *= 10; pages += data[i] - '0'; } } close(fd); return pages * page_size; } case INVALID_TAG: default: return -1; } } size_t sicm_avail(struct sicm_device* device) { static const size_t path_len = 100; char path[path_len]; int i; switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM:; int node = sicm_numa_id(device); int page_size = sicm_device_page_size(device); if(page_size == normal_page_size) { snprintf(path, path_len, "/sys/devices/system/node/node%d/meminfo", node); int fd = open(path, O_RDONLY); #if 0 char data[66]; if (read(fd, data, 66) != 66) { close(fd); return -1; } close(fd); size_t res = 0; size_t factor = 1; for(i = 65; data[i] != ' '; i--) { res += factor * (data[i] - '0'); factor *= 10; } #else char data[128]; if (read(fd, data, 128) != 128) { close(fd); return -1; } close(fd); size_t res = 0; int rc = 0; /* TODO: More testing */ rc = parse_meminfo(data, 128, "MemFree", &res); if (rc <= 0) { fprintf(stderr, "Error: failed to get available memory for node %d\n", node); return -1; } #endif return res; } else { snprintf(path, path_len, "/sys/devices/system/node/node%d/hugepages/hugepages-%dkB/free_hugepages", node, page_size); int fd = open(path, O_RDONLY); int pages = 0; char data[10]; while(read(fd, data, 10) > 0) { for(i = 0; i < 10; i++) { if(data[i] < '0' || data[i] > '9') break; pages *= 10; pages += data[i] - '0'; } } close(fd); return pages * page_size; } case INVALID_TAG: default: return -1; } } int sicm_model_distance(struct sicm_device* device) { switch(device->tag) { case SICM_DRAM: case SICM_KNL_HBM: case SICM_OPTANE: case SICM_POWERPC_HBM:; int node = sicm_numa_id(device); return numa_distance(node, numa_node_of_cpu(sched_getcpu())); case INVALID_TAG: default: return -1; } } int sicm_is_near(struct sicm_device* device) { int dist; dist = numa_distance(sicm_numa_id(device), numa_node_of_cpu(sched_getcpu())); switch(device->tag) { case SICM_DRAM: return dist == 10; case SICM_KNL_HBM: return dist == 31; case SICM_OPTANE: return dist == 17; case SICM_POWERPC_HBM: return dist == 80; case INVALID_TAG: default: return 0; } } void sicm_latency(struct sicm_device* device, size_t size, int iter, struct sicm_timing* res) { struct timespec start, end; int i; char b = 0; unsigned int n = time(NULL); clock_gettime(CLOCK_MONOTONIC_RAW, &start); char* blob = sicm_device_alloc(device, size); clock_gettime(CLOCK_MONOTONIC_RAW, &end); res->alloc = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; clock_gettime(CLOCK_MONOTONIC_RAW, &start); for(i = 0; i < iter; i++) { sicm_rand(n); blob[n % size] = 0; } clock_gettime(CLOCK_MONOTONIC_RAW, &end); res->write = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; clock_gettime(CLOCK_MONOTONIC_RAW, &start); for(i = 0; i < iter; i++) { sicm_rand(n); b = blob[n % size]; } clock_gettime(CLOCK_MONOTONIC_RAW, &end); // Write it back so hopefully it won't compile away the read blob[0] = b; res->read = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; clock_gettime(CLOCK_MONOTONIC_RAW, &start); sicm_device_free(device, blob, size); clock_gettime(CLOCK_MONOTONIC_RAW, &end); res->free = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; } size_t sicm_bandwidth_linear2(struct sicm_device* device, size_t size, size_t (*kernel)(double*, double*, size_t)) { struct timespec start, end; double* a = sicm_device_alloc(device, size * sizeof(double)); double* b = sicm_device_alloc(device, size * sizeof(double)); unsigned int i; #pragma omp parallel for for(i = 0; i < size; i++) { a[i] = 1; b[i] = 2; } clock_gettime(CLOCK_MONOTONIC_RAW, &start); size_t accesses = kernel(a, b, size); clock_gettime(CLOCK_MONOTONIC_RAW, &end); size_t delta = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; sicm_device_free(device, a, size * sizeof(double)); sicm_device_free(device, b, size * sizeof(double)); return accesses / delta; } size_t sicm_bandwidth_random2(struct sicm_device* device, size_t size, size_t (*kernel)(double*, double*, size_t*, size_t)) { struct timespec start, end; double* a = sicm_device_alloc(device, size * sizeof(double)); double* b = sicm_device_alloc(device, size * sizeof(double)); size_t* indexes = sicm_device_alloc(device, size * sizeof(size_t)); unsigned int i; #pragma omp parallel for for(i = 0; i < size; i++) { a[i] = 1; b[i] = 2; indexes[i] = sicm_hash(i) % size; } clock_gettime(CLOCK_MONOTONIC_RAW, &start); size_t accesses = kernel(a, b, indexes, size); clock_gettime(CLOCK_MONOTONIC_RAW, &end); size_t delta = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; sicm_device_free(device, a, size * sizeof(double)); sicm_device_free(device, b, size * sizeof(double)); sicm_device_free(device, indexes, size * sizeof(size_t)); return accesses / delta; } size_t sicm_bandwidth_linear3(struct sicm_device* device, size_t size, size_t (*kernel)(double*, double*, double*, size_t)) { struct timespec start, end; double* a = sicm_device_alloc(device, 3 * size * sizeof(double)); double* b = &a[size]; double* c = &a[size * 2]; unsigned int i; #pragma omp parallel for for(i = 0; i < size; i++) { a[i] = 1; b[i] = 2; c[i] = 3; } clock_gettime(CLOCK_MONOTONIC_RAW, &start); size_t accesses = kernel(a, b, c, size); clock_gettime(CLOCK_MONOTONIC_RAW, &end); size_t delta = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; sicm_device_free(device, a, 3 * size * sizeof(double)); return accesses / delta; } size_t sicm_bandwidth_random3(struct sicm_device* device, size_t size, size_t (*kernel)(double*, double*, double*, size_t*, size_t)) { struct timespec start, end; double* a = sicm_device_alloc(device, size * sizeof(double)); double* b = sicm_device_alloc(device, size * sizeof(double)); double* c = sicm_device_alloc(device, size * sizeof(double)); size_t* indexes = sicm_device_alloc(device, size * sizeof(size_t)); unsigned int i; #pragma omp parallel for for(i = 0; i < size; i++) { a[i] = 1; b[i] = 2; c[i] = 3; indexes[i] = sicm_hash(i) % size; } clock_gettime(CLOCK_MONOTONIC_RAW, &start); size_t accesses = kernel(a, b, c, indexes, size); clock_gettime(CLOCK_MONOTONIC_RAW, &end); size_t delta = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; sicm_device_free(device, a, size * sizeof(double)); sicm_device_free(device, b, size * sizeof(double)); sicm_device_free(device, c, size * sizeof(double)); sicm_device_free(device, indexes, size * sizeof(size_t)); return accesses / delta; } size_t sicm_triad_kernel_linear(double* a, double* b, double* c, size_t size) { int i; double scalar = 3.0; #pragma omp parallel for for(i = 0; i < size; i++) { a[i] = b[i] + scalar * c[i]; } return size * 3 * sizeof(double); } size_t sicm_triad_kernel_random(double* a, double* b, double* c, size_t* indexes, size_t size) { int i, idx; double scalar = 3.0; #pragma omp parallel for for(i = 0; i < size; i++) { idx = indexes[i]; a[idx] = b[idx] + scalar * c[idx]; } return size * (sizeof(size_t) + 3 * sizeof(double)); }
2Dpfold.c
/* * minimum free energy * RNA secondary structure with * basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2 * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include <float.h> /* #defines FLT_MAX ... */ #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/2Dpfold.h" /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void crosslink(TwoDpfold_vars *vars); PRIVATE void pf2D_linear(vrna_fold_compound_t *vc); PRIVATE void pf2D_circ(vrna_fold_compound_t *vc); PRIVATE char *pbacktrack_circ(vrna_fold_compound_t *vc, int d1, int d2); PRIVATE void backtrack(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm1(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm2(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int k); PRIVATE void backtrack_qcH(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2); PRIVATE void backtrack_qcI(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2); PRIVATE void backtrack_qcM(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2); PRIVATE void adjustArrayBoundaries(FLT_OR_DBL ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_real, int k_max_real, int *l_min_real, int *l_max_real); INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareArray(FLT_OR_DBL ***array, int min_k, int max_k, int *min_l, int *max_l); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC vrna_sol_TwoD_pf_t * vrna_pf_TwoD(vrna_fold_compound_t *vc, int distance1, int distance2) { unsigned int maxD1 = 0, maxD2 = 0, counter = 0; int cnt1, cnt2, k_min, k_max, l_min, l_max, ndx; FLT_OR_DBL q = 0.; vrna_sol_TwoD_pf_t *output; vrna_md_t *md; vrna_mx_pf_t *matrices; maxD1 = vc->maxD1; maxD2 = vc->maxD2; matrices = vc->exp_matrices; md = &(vc->exp_params->model_details); if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) vrna_message_warning("vrna_pf_TwoD@2Dpfold.c: limiting maximum basepair distance 1 to %u\n", maxD1); else maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) vrna_message_warning("vrna_pf_TwoD@2Dpfold.c: limiting maximum basepair distance 2 to %u\n", maxD2); else maxD2 = (unsigned int)distance2; } vc->maxD1 = maxD1; vc->maxD2 = maxD2; output = (vrna_sol_TwoD_pf_t *)vrna_alloc((((maxD1 + 1) * (maxD2 + 2)) / 2 + 2) * sizeof(vrna_sol_TwoD_pf_t)); pf2D_linear(vc); if (md->circ) pf2D_circ(vc); ndx = vc->iindx[1] - vc->length; k_min = (md->circ) ? matrices->k_min_Q_c : matrices->k_min_Q[ndx]; k_max = (md->circ) ? matrices->k_max_Q_c : matrices->k_max_Q[ndx]; for (cnt1 = k_min; cnt1 <= k_max; cnt1++) { l_min = (md->circ) ? matrices->l_min_Q_c[cnt1] : matrices->l_min_Q[ndx][cnt1]; l_max = (md->circ) ? matrices->l_max_Q_c[cnt1] : matrices->l_max_Q[ndx][cnt1]; for (cnt2 = l_min; cnt2 <= l_max; cnt2 += 2) { q = (md->circ) ? matrices->Q_c[cnt1][cnt2 / 2] : matrices->Q[ndx][cnt1][cnt2 / 2]; if (q == 0.) continue; output[counter].k = cnt1; output[counter].l = cnt2; output[counter].q = q; counter++; } } /* store entry for remaining partition if it exists */ q = (md->circ) ? matrices->Q_c_rem : matrices->Q_rem[ndx]; if (q != 0.) { output[counter].k = -1; output[counter].l = -1; output[counter].q = q; counter++; } /* insert end-marker entry */ output[counter].k = output[counter].l = INF; counter++; /* resize to actual dataset amount */ output = (vrna_sol_TwoD_pf_t *)vrna_realloc(output, sizeof(vrna_sol_TwoD_pf_t) * counter); return output; } #if 0 PUBLIC FLT_OR_DBL ** TwoDpfold(TwoDpfold_vars *vars, int distance1, int distance2) { unsigned int i; unsigned int maxD1 = 0; unsigned int maxD2 = 0; unsigned int mm; int cnt1, cnt2; FLT_OR_DBL **output; initialize_TwoDpfold_vars(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes2(vars); for (i = 1; i <= (unsigned int)vars->reference_pt1[0]; i++) if (i < (unsigned int)vars->reference_pt1[i]) maxD1++; for (i = 1; i <= (unsigned int)vars->reference_pt2[0]; i++) if (i < (unsigned int)vars->reference_pt2[i]) maxD2++; mm = maximumMatching(vars->sequence); maxD1 += mm; maxD2 += mm; if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1); maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2); maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (maxD1 + 1)); pf2D_linear(vars); int ndx = vars->my_iindx[1] - vars->seq_length; for (cnt1 = vars->k_min_values[ndx]; cnt1 <= MIN2(vars->k_max_values[ndx], vars->maxD1); cnt1++) { output[cnt1] = (FLT_OR_DBL *)vrna_alloc((vars->maxD2 + 1) * sizeof(FLT_OR_DBL)); for (cnt2 = vars->l_min_values[ndx][cnt1]; cnt2 <= MIN2(vars->l_max_values[ndx][cnt1], vars->maxD2); cnt2 += 2) output[cnt1][cnt2] = vars->Q[ndx][cnt1][cnt2 / 2]; } return output; } PUBLIC FLT_OR_DBL ** TwoDpfold_circ(TwoDpfold_vars *vars, int distance1, int distance2) { unsigned int i; unsigned int maxD1 = 0; unsigned int maxD2 = 0; unsigned int mm; int cnt1, cnt2; FLT_OR_DBL **output; initialize_TwoDpfold_vars(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes2(vars); for (i = 1; i <= (unsigned int)vars->reference_pt1[0]; i++) if (i < (unsigned int)vars->reference_pt1[i]) maxD1++; for (i = 1; i <= (unsigned int)vars->reference_pt2[0]; i++) if (i < (unsigned int)vars->reference_pt2[i]) maxD2++; mm = maximumMatching(vars->sequence); maxD1 += mm; maxD2 += mm; if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1); maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2); maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (maxD1 + 1)); pf2D_linear(vars); pf2D_circ(vars); for (cnt1 = vars->k_min_values_qc; cnt1 <= MIN2(vars->k_max_values_qc, vars->maxD1); cnt1++) { output[cnt1] = (FLT_OR_DBL *)vrna_alloc((vars->maxD2 + 1) * sizeof(FLT_OR_DBL)); for (cnt2 = vars->l_min_values_qc[cnt1]; cnt2 <= MIN2(vars->l_max_values_qc[cnt1], vars->maxD2); cnt2 += 2) output[cnt1][cnt2] = vars->Q_c[cnt1][cnt2 / 2]; } return output; } #endif PRIVATE void pf2D_linear(vrna_fold_compound_t *vc) { char *sequence, *ptype; short *S1, *reference_pt1, *reference_pt2; unsigned int *referenceBPs1, *referenceBPs2, d, i, j, ij, seq_length, maxD1, maxD2, *mm1, *mm2, *bpdist; int *my_iindx, *jindx, circ, cnt1, cnt2, cnt3, cnt4, *rtype; double max_real; FLT_OR_DBL *scale, Qmax; vrna_exp_param_t *pf_params; vrna_mx_pf_t *matrices; vrna_md_t *md; max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; sequence = vc->sequence; seq_length = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); scale = matrices->scale; reference_pt1 = vc->reference_pt1; reference_pt2 = vc->reference_pt2; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; circ = md->circ; mm1 = vc->mm1; mm2 = vc->mm2; bpdist = vc->bpdist; Qmax = 0.; /*array initialization ; qb,qm,q * qb,qm,q (i,j) are stored as ((n+1-i)*(n-i) div 2 + n+1-j */ for (j = 1; j <= seq_length; j++) for (i = (j > TURN ? (j - TURN) : 1); i <= j; i++) { ij = my_iindx[i] - j; matrices->k_min_Q[ij] = 0; matrices->k_max_Q[ij] = 0; matrices->l_min_Q[ij] = (int *)vrna_alloc(sizeof(int)); matrices->l_max_Q[ij] = (int *)vrna_alloc(sizeof(int)); matrices->l_min_Q[ij][0] = 0; matrices->l_max_Q[ij][0] = 0; matrices->Q[ij] = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *)); matrices->Q[ij][0] = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL)); matrices->Q[ij][0][0] = 1.0 * scale[j - i + 1]; } for (d = TURN + 2; d <= seq_length; d++) { /* i,j in [1..seq_length] */ #ifdef _OPENMP #pragma omp parallel for private(i, j, ij, cnt1, cnt2, cnt3, cnt4) #endif for (j = d; j <= seq_length; j++) { unsigned int k, l, kl, u, ii, dij; int no_close, type, type_2, tt, da, db, base_da, base_db; FLT_OR_DBL temp2, aux_en; i = j - d + 1; ij = my_iindx[i] - j; dij = j - i - 1; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); if (type) { /* we have a pair */ int k_min_Q_B, k_max_Q_B, l_min_Q_B, l_max_Q_B; int k_min_post_b, k_max_post_b, *l_min_post_b, *l_max_post_b; int update_b = 0; if (!matrices->Q_B[ij]) { update_b = 1; k_min_Q_B = l_min_Q_B = 0; k_max_Q_B = mm1[ij] + referenceBPs1[ij]; l_max_Q_B = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_Q_B, k_max_Q_B, l_min_Q_B, l_max_Q_B, bpdist[ij], &matrices->k_min_Q_B[ij], &matrices->k_max_Q_B[ij], &matrices->l_min_Q_B[ij], &matrices->l_max_Q_B[ij] ); preparePosteriorBoundaries(matrices->k_max_Q_B[ij] - matrices->k_min_Q_B[ij] + 1, matrices->k_min_Q_B[ij], &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); prepareArray(&matrices->Q_B[ij], matrices->k_min_Q_B[ij], matrices->k_max_Q_B[ij], matrices->l_min_Q_B[ij], matrices->l_max_Q_B[ij] ); } /* hairpin ----------------------------------------------*/ /* get distance to reference if closing the hairpin * d1a = dbp(T1_{i,j}, {i,j}) */ base_da = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_db = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_da + referenceBPs1[ij]; db = base_db + referenceBPs2[ij]; if (!no_close) { if ((da >= 0) && (db >= 0)) { if (((unsigned int)da <= maxD1) && ((unsigned int)db <= maxD2)) { matrices->Q_B[ij][da][db / 2] = exp_E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[dij + 2]; if (update_b) { updatePosteriorBoundaries(da, db, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else { matrices->Q_B_rem[ij] = exp_E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[dij + 2]; } } } /*-------------------------------------------------------- * check for elementary structures involving more than one * closing pair. * --------------------------------------------------------*/ for (k = i + 1; k <= MIN2(j - 2 - TURN, i + MAXLOOP + 1); k++) { unsigned int minl, ln_pre; minl = k + TURN + 1; ln_pre = dij + k; if (ln_pre > minl + MAXLOOP) minl = ln_pre - MAXLOOP - 1; for (l = minl; l < j; l++) { kl = my_iindx[k] - l; type_2 = ptype[jindx[l] + k]; if (type_2 == 0) continue; type_2 = rtype[type_2]; aux_en = exp_E_IntLoop(k - i - 1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[k - i + j - l]; /* get distance to reference if closing the interior loop * d2 = dbp(S_{i,j}, S_{k,l} + {i,j}) */ da = base_da + referenceBPs1[ij] - referenceBPs1[kl]; db = base_db + referenceBPs2[ij] - referenceBPs2[kl]; if (matrices->Q_B_rem[kl]) matrices->Q_B_rem[ij] += matrices->Q_B_rem[kl] * aux_en; if (!matrices->Q_B[kl]) continue; for (cnt1 = matrices->k_min_Q_B[kl]; cnt1 <= matrices->k_max_Q_B[kl]; cnt1++) for (cnt2 = matrices->l_min_Q_B[kl][cnt1]; cnt2 <= matrices->l_max_Q_B[kl][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_B[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_B[kl][cnt1][cnt2 / 2] * aux_en; if (update_b) { updatePosteriorBoundaries(da + cnt1, db + cnt2, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else { matrices->Q_B_rem[ij] += matrices->Q_B[kl][cnt1][cnt2 / 2] * aux_en; } } } /* end l-loop */ } /* end k-loop */ /* multi-loop contribution ------------------------*/ if (!no_close) { for (u = i + TURN + 2; u < j - TURN - 2; u++) { tt = rtype[type]; temp2 = pf_params->expMLclosing * exp_E_MLstem(tt, S1[j - 1], S1[i + 1], pf_params) * scale[2]; if (matrices->Q_M_rem[my_iindx[i + 1] - u]) { if (matrices->Q_M1[jindx[j - 1] + u + 1]) { for (cnt1 = matrices->k_min_Q_M1[jindx[j - 1] + u + 1]; cnt1 <= matrices->k_max_Q_M1[jindx[j - 1] + u + 1]; cnt1++) for (cnt2 = matrices->l_min_Q_M1[jindx[j - 1] + u + 1][cnt1]; cnt2 <= matrices->l_max_Q_M1[jindx[j - 1] + u + 1][cnt1]; cnt2 += 2) matrices->Q_B_rem[ij] += matrices->Q_M_rem[my_iindx[i + 1] - u] * matrices->Q_M1[jindx[j - 1] + u + 1][cnt1][cnt2 / 2] * temp2; } if (matrices->Q_M1_rem[jindx[j - 1] + u + 1]) matrices->Q_B_rem[ij] += matrices->Q_M_rem[my_iindx[i + 1] - u] * matrices->Q_M1_rem[jindx[j - 1] + u + 1] * temp2; } if (matrices->Q_M1_rem[jindx[j - 1] + u + 1]) { if (matrices->Q_M[my_iindx[i + 1] - u]) { for (cnt1 = matrices->k_min_Q_M[my_iindx[i + 1] - u]; cnt1 <= matrices->k_max_Q_M[my_iindx[i + 1] - u]; cnt1++) for (cnt2 = matrices->l_min_Q_M[my_iindx[i + 1] - u][cnt1]; cnt2 <= matrices->l_max_Q_M[my_iindx[i + 1] - u][cnt1]; cnt2 += 2) matrices->Q_B_rem[ij] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2] * matrices->Q_M1_rem[jindx[j - 1] + u + 1] * temp2; } } /* get distance to reference if closing the multiloop * dist3 = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1}) */ da = base_da + referenceBPs1[ij] - referenceBPs1[my_iindx[i + 1] - u] - referenceBPs1[my_iindx[u + 1] - j + 1]; db = base_db + referenceBPs2[ij] - referenceBPs2[my_iindx[i + 1] - u] - referenceBPs2[my_iindx[u + 1] - j + 1]; if (!matrices->Q_M[my_iindx[i + 1] - u]) continue; if (!matrices->Q_M1[jindx[j - 1] + u + 1]) continue; for (cnt1 = matrices->k_min_Q_M[my_iindx[i + 1] - u]; cnt1 <= matrices->k_max_Q_M[my_iindx[i + 1] - u]; cnt1++) for (cnt2 = matrices->l_min_Q_M[my_iindx[i + 1] - u][cnt1]; cnt2 <= matrices->l_max_Q_M[my_iindx[i + 1] - u][cnt1]; cnt2 += 2) { for (cnt3 = matrices->k_min_Q_M1[jindx[j - 1] + u + 1]; cnt3 <= matrices->k_max_Q_M1[jindx[j - 1] + u + 1]; cnt3++) for (cnt4 = matrices->l_min_Q_M1[jindx[j - 1] + u + 1][cnt3]; cnt4 <= matrices->l_max_Q_M1[jindx[j - 1] + u + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_B[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2] * matrices->Q_M1[jindx[j - 1] + u + 1][cnt3][cnt4 / 2] * temp2; if (update_b) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else { matrices->Q_B_rem[ij] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2] * matrices->Q_M1[jindx[j - 1] + u + 1][cnt3][cnt4 / 2] * temp2; } } } } } if (update_b) { adjustArrayBoundaries(&matrices->Q_B[ij], &matrices->k_min_Q_B[ij], &matrices->k_max_Q_B[ij], &matrices->l_min_Q_B[ij], &matrices->l_max_Q_B[ij], k_min_post_b, k_max_post_b, l_min_post_b, l_max_post_b ); } } /* end >> if (pair) << */ /* free ends ? -----------------------------------------*/ int k_min_Q_M, k_max_Q_M, l_min_Q_M, l_max_Q_M; int k_min_post_m, k_max_post_m, *l_min_post_m, *l_max_post_m; int update_m = 0; int k_min_Q_M1, k_max_Q_M1, l_min_Q_M1, l_max_Q_M1; int k_min_post_m1, k_max_post_m1, *l_min_post_m1, *l_max_post_m1; int update_m1 = 0; if (!matrices->Q_M[ij]) { update_m = 1; k_min_Q_M = l_min_Q_M = 0; k_max_Q_M = mm1[ij] + referenceBPs1[ij]; l_max_Q_M = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_Q_M, k_max_Q_M, l_min_Q_M, l_max_Q_M, bpdist[ij], &matrices->k_min_Q_M[ij], &matrices->k_max_Q_M[ij], &matrices->l_min_Q_M[ij], &matrices->l_max_Q_M[ij] ); preparePosteriorBoundaries(matrices->k_max_Q_M[ij] - matrices->k_min_Q_M[ij] + 1, matrices->k_min_Q_M[ij], &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); prepareArray(&matrices->Q_M[ij], matrices->k_min_Q_M[ij], matrices->k_max_Q_M[ij], matrices->l_min_Q_M[ij], matrices->l_max_Q_M[ij] ); } if (!matrices->Q_M1[jindx[j] + i]) { update_m1 = 1; k_min_Q_M1 = l_min_Q_M1 = 0; k_max_Q_M1 = mm1[ij] + referenceBPs1[ij]; l_max_Q_M1 = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_Q_M1, k_max_Q_M1, l_min_Q_M1, l_max_Q_M1, bpdist[ij], &matrices->k_min_Q_M1[jindx[j] + i], &matrices->k_max_Q_M1[jindx[j] + i], &matrices->l_min_Q_M1[jindx[j] + i], &matrices->l_max_Q_M1[jindx[j] + i] ); preparePosteriorBoundaries(matrices->k_max_Q_M1[jindx[j] + i] - matrices->k_min_Q_M1[jindx[j] + i] + 1, matrices->k_min_Q_M1[jindx[j] + i], &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); prepareArray(&matrices->Q_M1[jindx[j] + i], matrices->k_min_Q_M1[jindx[j] + i], matrices->k_max_Q_M1[jindx[j] + i], matrices->l_min_Q_M1[jindx[j] + i], matrices->l_max_Q_M1[jindx[j] + i] ); } /* j is unpaired */ da = referenceBPs1[ij] - referenceBPs1[ij + 1]; db = referenceBPs2[ij] - referenceBPs2[ij + 1]; if (matrices->Q_M_rem[ij + 1]) matrices->Q_M_rem[ij] += matrices->Q_M_rem[ij + 1] * pf_params->expMLbase * scale[1]; if (matrices->Q_M[ij + 1]) { for (cnt1 = matrices->k_min_Q_M[ij + 1]; cnt1 <= matrices->k_max_Q_M[ij + 1]; cnt1++) { for (cnt2 = matrices->l_min_Q_M[ij + 1][cnt1]; cnt2 <= matrices->l_max_Q_M[ij + 1][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_M[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_M[ij + 1][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1]; if (update_m) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else { matrices->Q_M_rem[ij] += matrices->Q_M[ij + 1][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1]; } } } } if (matrices->Q_M1_rem[jindx[j - 1] + i]) matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_M1_rem[jindx[j - 1] + i] * pf_params->expMLbase * scale[1]; if (matrices->Q_M1[jindx[j - 1] + i]) { for (cnt1 = matrices->k_min_Q_M1[jindx[j - 1] + i]; cnt1 <= matrices->k_max_Q_M1[jindx[j - 1] + i]; cnt1++) for (cnt2 = matrices->l_min_Q_M1[jindx[j - 1] + i][cnt1]; cnt2 <= matrices->l_max_Q_M1[jindx[j - 1] + i][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_M1[jindx[j] + i][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_M1[jindx[j - 1] + i][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1]; if (update_m1) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); } } else { matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_M1[jindx[j - 1] + i][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1]; } } } /* j pairs with i */ if ((!no_close) && type) { FLT_OR_DBL aux_en = exp_E_MLstem(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, pf_params); if (matrices->Q_B_rem[ij]) { matrices->Q_M_rem[ij] += matrices->Q_B_rem[ij] * aux_en; matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_B_rem[ij] * aux_en; } if (matrices->Q_B[ij]) { for (cnt1 = matrices->k_min_Q_B[ij]; cnt1 <= matrices->k_max_Q_B[ij]; cnt1++) for (cnt2 = matrices->l_min_Q_B[ij][cnt1]; cnt2 <= matrices->l_max_Q_B[ij][cnt1]; cnt2 += 2) { matrices->Q_M[ij][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en; if (update_m) { updatePosteriorBoundaries(cnt1, cnt2, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } matrices->Q_M1[jindx[j] + i][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en; if (update_m1) { updatePosteriorBoundaries(cnt1, cnt2, &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); } } } } /* j pairs with k: i<k<j */ ii = my_iindx[i]; for (k = i + 1; k <= j; k++) { tt = ptype[jindx[j] + k]; temp2 = exp_E_MLstem(tt, S1[k - 1], (j < seq_length) || circ ? S1[j + 1] : -1, pf_params); if (matrices->Q_B_rem[my_iindx[k] - j]) { matrices->Q_M_rem[ij] += matrices->Q_B_rem[my_iindx[k] - j] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2; if (matrices->Q_M[ii - k + 1]) { for (cnt1 = matrices->k_min_Q_M[ii - k + 1]; cnt1 <= matrices->k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= matrices->l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) matrices->Q_M_rem[ij] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B_rem[my_iindx[k] - j] * temp2; } if (matrices->Q_M_rem[ii - k + 1]) matrices->Q_M_rem[ij] += matrices->Q_M_rem[ii - k + 1] * matrices->Q_B_rem[my_iindx[k] - j] * temp2; } if (matrices->Q_M_rem[ii - k + 1]) { if (matrices->Q_B[my_iindx[k] - j]) { for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt1++) for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1]; cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1]; cnt2 += 2) matrices->Q_M_rem[ij] += matrices->Q_M_rem[my_iindx[k] - j] * matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * temp2; } } /* add contributions of QM(i,k-1)*QB(k,j)*e^b and * e^((k-i) * c) * QB(k,j) * e^b * therefor we need d1a = dbp(T1_{i,j}, T1_{i,k-1} + T1_{k,j}), * d1b = dbp(T2_{i,j}, T2_{i,k-1} + T2_{k,j}) * d1c = dbp(T1_{i,j}, T1_{k,j})circ = 0; * d1d = dbp(T2_{i,j}, T2_{k,j}) */ da = referenceBPs1[ij] - referenceBPs1[my_iindx[k] - j]; db = referenceBPs2[ij] - referenceBPs2[my_iindx[k] - j]; if (!matrices->Q_B[my_iindx[k] - j]) continue; for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt1++) for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1]; cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_M[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2; if (update_m) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else { matrices->Q_M_rem[ij] += matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2; } } if (!matrices->Q_M[ii - k + 1]) continue; da -= referenceBPs1[ii - k + 1]; db -= referenceBPs2[ii - k + 1]; for (cnt1 = matrices->k_min_Q_M[ii - k + 1]; cnt1 <= matrices->k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= matrices->l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt3 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt3++) for (cnt4 = matrices->l_min_Q_B[my_iindx[k] - j][cnt3]; cnt4 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_M[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2; if (update_m) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else { matrices->Q_M_rem[ij] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2; } } } if (update_m) { adjustArrayBoundaries(&matrices->Q_M[ij], &matrices->k_min_Q_M[ij], &matrices->k_max_Q_M[ij], &matrices->l_min_Q_M[ij], &matrices->l_max_Q_M[ij], k_min_post_m, k_max_post_m, l_min_post_m, l_max_post_m ); } if (update_m1) { adjustArrayBoundaries(&matrices->Q_M1[jindx[j] + i], &matrices->k_min_Q_M1[jindx[j] + i], &matrices->k_max_Q_M1[jindx[j] + i], &matrices->l_min_Q_M1[jindx[j] + i], &matrices->l_max_Q_M1[jindx[j] + i], k_min_post_m1, k_max_post_m1, l_min_post_m1, l_max_post_m1 ); } /* compute contributions for Q(i,j) */ int k_min, k_max, l_min, l_max; int k_min_post, k_max_post, *l_min_post, *l_max_post; int update_q = 0; if (!matrices->Q[ij]) { update_q = 1; k_min = l_min = 0; k_max = mm1[ij] + referenceBPs1[ij]; l_max = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min, k_max, l_min, l_max, bpdist[ij], &matrices->k_min_Q[ij], &matrices->k_max_Q[ij], &matrices->l_min_Q[ij], &matrices->l_max_Q[ij] ); preparePosteriorBoundaries(matrices->k_max_Q[ij] - matrices->k_min_Q[ij] + 1, matrices->k_min_Q[ij], &k_min_post, &k_max_post, &l_min_post, &l_max_post ); prepareArray(&matrices->Q[ij], matrices->k_min_Q[ij], matrices->k_max_Q[ij], matrices->l_min_Q[ij], matrices->l_max_Q[ij] ); } if (type) { aux_en = exp_E_ExtLoop(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, pf_params); if (matrices->Q_B_rem[ij]) matrices->Q_rem[ij] += matrices->Q_B_rem[ij] * aux_en; if (matrices->Q_B[ij]) { for (cnt1 = matrices->k_min_Q_B[ij]; cnt1 <= matrices->k_max_Q_B[ij]; cnt1++) for (cnt2 = matrices->l_min_Q_B[ij][cnt1]; cnt2 <= matrices->l_max_Q_B[ij][cnt1]; cnt2 += 2) { matrices->Q[ij][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en; if (update_q) { updatePosteriorBoundaries(cnt1, cnt2, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } } } /* j is unpaired */ if (matrices->Q_rem[ij + 1]) matrices->Q_rem[ij] += matrices->Q_rem[ij + 1] * scale[1]; /* da = dbp(T1_{i,j}, T1_{i,j-1}) * db = dbp(T2_{i,j}, T2_{i,j-1}) */ da = referenceBPs1[ij] - referenceBPs1[ij + 1]; db = referenceBPs2[ij] - referenceBPs2[ij + 1]; if (matrices->Q[ij + 1]) { for (cnt1 = matrices->k_min_Q[ij + 1]; cnt1 <= matrices->k_max_Q[ij + 1]; cnt1++) for (cnt2 = matrices->l_min_Q[ij + 1][cnt1]; cnt2 <= matrices->l_max_Q[ij + 1][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q[ij + 1][cnt1][cnt2 / 2] * scale[1]; if (update_q) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } else { matrices->Q_rem[ij] += matrices->Q[ij + 1][cnt1][cnt2 / 2] * scale[1]; } } } for (k = j - TURN - 1; k > i; k--) { tt = ptype[jindx[j] + k]; temp2 = exp_E_ExtLoop(tt, S1[k - 1], (j < seq_length) || circ ? S1[j + 1] : -1, pf_params); if (matrices->Q_rem[my_iindx[i] - k + 1]) { if (matrices->Q_B[my_iindx[k] - j]) { for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt1++) for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1]; cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1]; cnt2 += 2) matrices->Q_rem[ij] += matrices->Q_rem[my_iindx[i] - k + 1] * matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * temp2; } if (matrices->Q_B_rem[my_iindx[k] - j]) matrices->Q_rem[ij] += matrices->Q_rem[my_iindx[i] - k + 1] * matrices->Q_B_rem[my_iindx[k] - j] * temp2; } if (matrices->Q_B_rem[my_iindx[k] - j]) { if (matrices->Q[my_iindx[i] - k + 1]) { for (cnt1 = matrices->k_min_Q[my_iindx[i] - k + 1]; cnt1 <= matrices->k_max_Q[my_iindx[i] - k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q[my_iindx[i] - k + 1][cnt1]; cnt2 <= matrices->l_max_Q[my_iindx[i] - k + 1][cnt1]; cnt2 += 2) matrices->Q_rem[ij] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B_rem[my_iindx[k] - j] * temp2; } } /* da = dbp{T1_{i,j}, T1_{k,j} * db = dbp{T2_{i,j}, T2_{k,j}} */ da = referenceBPs1[ij] - referenceBPs1[my_iindx[k] - j] - referenceBPs1[my_iindx[i] - k + 1]; db = referenceBPs2[ij] - referenceBPs2[my_iindx[k] - j] - referenceBPs2[my_iindx[i] - k + 1]; if (!matrices->Q[my_iindx[i] - k + 1]) continue; if (!matrices->Q_B[my_iindx[k] - j]) continue; for (cnt1 = matrices->k_min_Q[my_iindx[i] - k + 1]; cnt1 <= matrices->k_max_Q[my_iindx[i] - k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q[my_iindx[i] - k + 1][cnt1]; cnt2 <= matrices->l_max_Q[my_iindx[i] - k + 1][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt3 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt3++) for (cnt4 = matrices->l_min_Q_B[my_iindx[k] - j][cnt3]; cnt4 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2; if (update_q) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } else { matrices->Q_rem[ij] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2; } } } if (update_q) { adjustArrayBoundaries(&matrices->Q[ij], &matrices->k_min_Q[ij], &matrices->k_max_Q[ij], &matrices->l_min_Q[ij], &matrices->l_max_Q[ij], k_min_post, k_max_post, l_min_post, l_max_post ); } #if 1 for (cnt1 = matrices->k_min_Q[ij]; cnt1 <= matrices->k_max_Q[ij]; cnt1++) { for (cnt2 = matrices->l_min_Q[ij][cnt1]; cnt2 <= matrices->l_max_Q[ij][cnt1]; cnt2 += 2) { if (matrices->Q[ij][cnt1][cnt2 / 2] > Qmax) { Qmax = matrices->Q[ij][cnt1][cnt2 / 2]; if (Qmax > max_real / 10.) vrna_message_warning("Q close to overflow: %u %u %g\n", i, j, matrices->Q[ij][cnt1][cnt2 / 2]); } if (matrices->Q[ij][cnt1][cnt2 / 2] >= max_real) vrna_message_error("overflow in pf_fold while calculating q[%u,%u]\n" "use larger pf_scale", i, j); } } #endif } /* end of j-loop */ } } /* calculate partition function for circular case */ /* NOTE: this is the postprocessing step ONLY */ /* You have to call pf2D_linear first to calculate */ /* complete circular case!!! */ PRIVATE void pf2D_circ(vrna_fold_compound_t *vc) { unsigned int d, p, q, pq, k, l, kl, u, da, db, seq_length, maxD1, maxD2, base_d1, base_d2, *mm1, *mm2, *bpdist; int *my_iindx, *jindx, type, cnt1, cnt2, cnt3, cnt4, *rtype; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *sequence, *ptype; FLT_OR_DBL *scale; vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */ vrna_md_t *md; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; sequence = vc->sequence; seq_length = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); scale = matrices->scale; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; mm1 = vc->mm1; mm2 = vc->mm2; bpdist = vc->bpdist; FLT_OR_DBL ***Q_B, ***Q_M, ***Q_M1; FLT_OR_DBL *Q_B_rem, *Q_M_rem, *Q_M1_rem; int **l_min_Q_B, **l_max_Q_B, **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1; int *k_min_Q_B, *k_max_Q_B, *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1; Q_B = matrices->Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; Q_M = matrices->Q_M; l_min_Q_M = matrices->l_min_Q_M; l_max_Q_M = matrices->l_max_Q_M; k_min_Q_M = matrices->k_min_Q_M; k_max_Q_M = matrices->k_max_Q_M; Q_M1 = matrices->Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; Q_B_rem = matrices->Q_B_rem; Q_M_rem = matrices->Q_M_rem; Q_M1_rem = matrices->Q_M1_rem; matrices->Q_c_rem = 0.; matrices->Q_cH_rem = 0.; matrices->Q_cI_rem = 0.; matrices->Q_cM_rem = 0.; /* construct qm2 matrix from qm1 entries */ #ifdef _OPENMP #pragma omp parallel for private(d, k, l, da, db, cnt1, cnt2, cnt3, cnt4) #endif for (k = 1; k < seq_length - TURN - 1; k++) { int k_min_Q_M2, k_max_Q_M2, l_min_Q_M2, l_max_Q_M2; int k_min_post_m2, k_max_post_m2, *l_min_post_m2, *l_max_post_m2; int update_m2 = 0; l_min_post_m2 = l_max_post_m2 = NULL; if (!matrices->Q_M2[k]) { update_m2 = 1; k_min_Q_M2 = l_min_Q_M2 = 0; k_max_Q_M2 = mm1[my_iindx[k] - seq_length] + referenceBPs1[my_iindx[k] - seq_length]; l_max_Q_M2 = mm2[my_iindx[k] - seq_length] + referenceBPs2[my_iindx[k] - seq_length]; prepareBoundaries(k_min_Q_M2, k_max_Q_M2, l_min_Q_M2, l_max_Q_M2, bpdist[my_iindx[k] - seq_length], &matrices->k_min_Q_M2[k], &matrices->k_max_Q_M2[k], &matrices->l_min_Q_M2[k], &matrices->l_max_Q_M2[k] ); preparePosteriorBoundaries(matrices->k_max_Q_M2[k] - matrices->k_min_Q_M2[k] + 1, matrices->k_min_Q_M2[k], &k_min_post_m2, &k_max_post_m2, &l_min_post_m2, &l_max_post_m2 ); prepareArray(&matrices->Q_M2[k], matrices->k_min_Q_M2[k], matrices->k_max_Q_M2[k], matrices->l_min_Q_M2[k], matrices->l_max_Q_M2[k] ); } /* construct Q_M2 */ for (l = k + TURN + 1; l < seq_length - TURN - 1; l++) { if (Q_M1_rem[jindx[l] + k]) { if (Q_M1[jindx[seq_length] + l + 1]) { for (cnt1 = k_min_Q_M1[jindx[seq_length] + l + 1]; cnt1 <= k_max_Q_M1[jindx[seq_length] + l + 1]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[seq_length] + l + 1][cnt1]; cnt2 <= l_max_Q_M1[jindx[seq_length] + l + 1][cnt1]; cnt2 += 2) matrices->Q_M2_rem[k] += Q_M1_rem[jindx[l] + k] * Q_M1[jindx[seq_length] + l + 1][cnt1][cnt2 / 2]; } if (Q_M1_rem[jindx[seq_length] + l + 1]) matrices->Q_M2_rem[k] += Q_M1_rem[jindx[l] + k] * Q_M1_rem[jindx[seq_length] + l + 1]; } if (Q_M1_rem[jindx[seq_length] + l + 1]) { if (Q_M1[jindx[l] + k]) { for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) matrices->Q_M2_rem[k] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1_rem[jindx[seq_length] + l + 1]; } } if (matrices->Q_M1[jindx[l] + k] && matrices->Q_M1[jindx[seq_length] + l + 1]) { da = referenceBPs1[my_iindx[k] - seq_length] - referenceBPs1[my_iindx[k] - l] - referenceBPs1[my_iindx[l + 1] - seq_length]; db = referenceBPs2[my_iindx[k] - seq_length] - referenceBPs2[my_iindx[k] - l] - referenceBPs2[my_iindx[l + 1] - seq_length]; for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) { for (cnt3 = k_min_Q_M1[jindx[seq_length] + l + 1]; cnt3 <= k_max_Q_M1[jindx[seq_length] + l + 1]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[seq_length] + l + 1][cnt3]; cnt4 <= l_max_Q_M1[jindx[seq_length] + l + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_M2[k][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[seq_length] + l + 1][cnt3][cnt4 / 2]; if (update_m2) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_m2, &k_max_post_m2, &l_min_post_m2, &l_max_post_m2 ); } } else { matrices->Q_M2_rem[k] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[seq_length] + l + 1][cnt3][cnt4 / 2]; } } } } } if (update_m2) { adjustArrayBoundaries(&matrices->Q_M2[k], &matrices->k_min_Q_M2[k], &matrices->k_max_Q_M2[k], &matrices->l_min_Q_M2[k], &matrices->l_max_Q_M2[k], k_min_post_m2, k_max_post_m2, l_min_post_m2, l_max_post_m2 ); } } base_d1 = referenceBPs1[my_iindx[1] - seq_length]; base_d2 = referenceBPs2[my_iindx[1] - seq_length]; int min_k, max_k, max_l, min_l; int min_k_real, max_k_real, min_k_real_qcH, max_k_real_qcH, min_k_real_qcI, max_k_real_qcI, min_k_real_qcM, max_k_real_qcM; int *min_l_real, *max_l_real, *min_l_real_qcH, *max_l_real_qcH, *min_l_real_qcI, *max_l_real_qcI, *min_l_real_qcM, *max_l_real_qcM; int update_c, update_cH, update_cI, update_cM; max_l_real_qcM = min_l_real_qcM = NULL; max_l_real_qcI = min_l_real_qcI = NULL; max_l_real_qcH = min_l_real_qcH = NULL; max_l_real = min_l_real = NULL; update_c = update_cH = update_cI = update_cM = 0; min_k = min_l = 0; max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length]; max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length]; #ifdef _OPENMP #pragma omp sections { #pragma omp section { #endif if (!matrices->Q_c) { update_c = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Q_c, &matrices->k_max_Q_c, &matrices->l_min_Q_c, &matrices->l_max_Q_c ); prepareArray(&matrices->Q_c, matrices->k_min_Q_c, matrices->k_max_Q_c, matrices->l_min_Q_c, matrices->l_max_Q_c ); preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } #ifdef _OPENMP } #pragma omp section { #endif if (!matrices->Q_cH) { update_cH = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Q_cH, &matrices->k_max_Q_cH, &matrices->l_min_Q_cH, &matrices->l_max_Q_cH ); prepareArray(&matrices->Q_cH, matrices->k_min_Q_cH, matrices->k_max_Q_cH, matrices->l_min_Q_cH, matrices->l_max_Q_cH ); preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_qcH, &max_k_real_qcH, &min_l_real_qcH, &max_l_real_qcH ); } #ifdef _OPENMP } #pragma omp section { #endif if (!matrices->Q_cI) { update_cI = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Q_cI, &matrices->k_max_Q_cI, &matrices->l_min_Q_cI, &matrices->l_max_Q_cI ); prepareArray(&matrices->Q_cI, matrices->k_min_Q_cI, matrices->k_max_Q_cI, matrices->l_min_Q_cI, matrices->l_max_Q_cI ); preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_qcI, &max_k_real_qcI, &min_l_real_qcI, &max_l_real_qcI ); } #ifdef _OPENMP } #pragma omp section { #endif if (!matrices->Q_cM) { update_cM = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Q_cM, &matrices->k_max_Q_cM, &matrices->l_min_Q_cM, &matrices->l_max_Q_cM ); prepareArray(&matrices->Q_cM, matrices->k_min_Q_cM, matrices->k_max_Q_cM, matrices->l_min_Q_cM, matrices->l_max_Q_cM ); preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_qcM, &max_k_real_qcM, &min_l_real_qcM, &max_l_real_qcM ); } #ifdef _OPENMP } } #endif for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ #ifdef _OPENMP #pragma omp parallel for private(p, q, pq, k, l, kl, u, da, db, type, cnt1, cnt2, cnt3, cnt4) #endif for (q = d; q <= seq_length; q++) { FLT_OR_DBL qot; char loopseq[10]; p = q - d + 1; pq = my_iindx[p] - q; /* 1. get exterior hairpin contribution */ u = seq_length - q + p - 1; if (u < TURN) continue; type = ptype[jindx[q] + p]; if (!type) continue; if (((type == 3) || (type == 4)) && no_closingGU) continue; /* cause we want to calc the exterior loops, we need the reversed pair type from now on */ type = rtype[type]; if (u < 7) { strcpy(loopseq, sequence + q - 1); strncat(loopseq, sequence, p); } /* get distance to reference if closing the hairpin * da = dbp(T1_[1,n}, T1_{p,q}) * db = dbp(T2_{1,n}, T2_{p,q}) */ da = base_d1 - referenceBPs1[pq]; db = base_d2 - referenceBPs2[pq]; qot = exp_E_Hairpin(u, type, S1[q + 1], S1[p - 1], loopseq, pf_params) * scale[u]; if (Q_B_rem[pq]) matrices->Q_cH_rem += Q_B_rem[pq] * qot; if (Q_B[pq]) { for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++) for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_cH[cnt1 + da][(cnt2 + db) / 2] += Q_B[pq][cnt1][cnt2 / 2] * qot; if (update_cH) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &min_k_real_qcH, &max_k_real_qcH, &min_l_real_qcH, &max_l_real_qcH ); } } else { matrices->Q_cH_rem += Q_B[pq][cnt1][cnt2 / 2] * qot; } } } /* 2. exterior interior loops, i "define" the (k,l) pair as "outer pair" */ /* so "outer type" is rtype[type[k,l]] and inner type is type[p,q] */ if (Q_B_rem[pq]) { for (k = q + 1; k < seq_length; k++) { unsigned int ln1, lstart, ln_pre; ln1 = k - q - 1; if (ln1 + p - 1 > MAXLOOP) break; lstart = k + TURN + 1; ln_pre = ln1 + p + seq_length; if (ln_pre > lstart + MAXLOOP) lstart = ln_pre - MAXLOOP - 1; for (l = lstart; l <= seq_length; l++) { unsigned int ln2; int type2; kl = my_iindx[k] - l; ln2 = (p - 1) + (seq_length - l); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[l] + k]; if (!type2) continue; qot = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], pf_params) * scale[ln1 + ln2]; if (Q_B_rem[kl]) matrices->Q_cI_rem += Q_B_rem[pq] * Q_B_rem[kl] * qot; if (Q_B[kl]) { for (cnt1 = k_min_Q_B[kl]; cnt1 <= k_max_Q_B[kl]; cnt1++) for (cnt2 = l_min_Q_B[kl][cnt1]; cnt2 <= l_max_Q_B[kl][cnt1]; cnt2 += 2) matrices->Q_cI_rem += Q_B_rem[pq] * Q_B[kl][cnt1][cnt2 / 2] * qot; } } } } if (Q_B[pq]) { for (k = q + 1; k < seq_length; k++) { unsigned int ln1, lstart, ln_pre; ln1 = k - q - 1; if (ln1 + p - 1 > MAXLOOP) break; lstart = k + TURN + 1; ln_pre = ln1 + p + seq_length; if (ln_pre > lstart + MAXLOOP) lstart = ln_pre - MAXLOOP - 1; for (l = lstart; l <= seq_length; l++) { unsigned int ln2; int type2; kl = my_iindx[k] - l; ln2 = (p - 1) + (seq_length - l); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[l] + k]; if (!type2) continue; qot = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], pf_params) * scale[ln1 + ln2]; if (Q_B_rem[kl]) { for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++) for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2) matrices->Q_cI_rem += Q_B[pq][cnt1][cnt2 / 2] * Q_B_rem[kl] * qot; } if (!Q_B[kl]) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{k,l}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{k,l}) */ da = base_d1 - referenceBPs1[pq] - referenceBPs1[kl]; db = base_d2 - referenceBPs2[pq] - referenceBPs2[kl]; for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++) for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_B[kl]; cnt3 <= k_max_Q_B[kl]; cnt3++) for (cnt4 = l_min_Q_B[kl][cnt3]; cnt4 <= l_max_Q_B[kl][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_cI[cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_B[pq][cnt1][cnt2 / 2] * Q_B[kl][cnt3][cnt4 / 2] * qot; if (update_cI) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &min_k_real_qcI, &max_k_real_qcI, &min_l_real_qcI, &max_l_real_qcI ); } } else { matrices->Q_cI_rem += Q_B[pq][cnt1][cnt2 / 2] * Q_B[kl][cnt3][cnt4 / 2] * qot; } } } } } } if (update_cH) { adjustArrayBoundaries(&matrices->Q_cH, &matrices->k_min_Q_cH, &matrices->k_max_Q_cH, &matrices->l_min_Q_cH, &matrices->l_max_Q_cH, min_k_real_qcH, max_k_real_qcH, min_l_real_qcH, max_l_real_qcH ); } if (update_cI) { adjustArrayBoundaries(&matrices->Q_cI, &matrices->k_min_Q_cI, &matrices->k_max_Q_cI, &matrices->l_min_Q_cI, &matrices->l_max_Q_cI, min_k_real_qcI, max_k_real_qcI, min_l_real_qcI, max_l_real_qcI ); } /* 3. Multiloops */ if (seq_length > 2 * TURN - 3) { #ifdef _OPENMP #pragma omp parallel for private(k, da, db, cnt1, cnt2, cnt3, cnt4) #endif for (k = TURN + 2; k < seq_length - 2 * TURN - 3; k++) { if (Q_M_rem[my_iindx[1] - k]) { if (matrices->Q_M2[k + 1]) { for (cnt1 = matrices->k_min_Q_M2[k + 1]; cnt1 <= matrices->k_max_Q_M2[k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q_M2[k + 1][cnt1]; cnt2 <= matrices->l_max_Q_M2[k + 1][cnt1]; cnt2 += 2) matrices->Q_cM_rem += Q_M_rem[my_iindx[1] - k] * matrices->Q_M2[k + 1][cnt1][cnt2 / 2] * pf_params->expMLclosing; } if (matrices->Q_M2_rem[k + 1]) matrices->Q_cM_rem += Q_M_rem[my_iindx[1] - k] * matrices->Q_M2_rem[k + 1] * pf_params->expMLclosing; } if (matrices->Q_M2_rem[k + 1]) { if (Q_M[my_iindx[1] - k]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) matrices->Q_cM_rem += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2_rem[k + 1] * pf_params->expMLclosing; } } /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ da = base_d1 - referenceBPs1[my_iindx[1] - k] - referenceBPs1[my_iindx[k + 1] - seq_length]; db = base_d2 - referenceBPs2[my_iindx[1] - k] - referenceBPs2[my_iindx[k + 1] - seq_length]; if (Q_M[my_iindx[1] - k] && matrices->Q_M2[k + 1]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_Q_M2[k + 1]; cnt3 <= matrices->k_max_Q_M2[k + 1]; cnt3++) for (cnt4 = matrices->l_min_Q_M2[k + 1][cnt3]; cnt4 <= matrices->l_max_Q_M2[k + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_cM[cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing; if (update_cM) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &min_k_real_qcM, &max_k_real_qcM, &min_l_real_qcM, &max_l_real_qcM ); } } else { matrices->Q_cM_rem += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing; } } } } } if (update_cM) { adjustArrayBoundaries(&matrices->Q_cM, &matrices->k_min_Q_cM, &matrices->k_max_Q_cM, &matrices->l_min_Q_cM, &matrices->l_max_Q_cM, min_k_real_qcM, max_k_real_qcM, min_l_real_qcM, max_l_real_qcM ); } for (cnt1 = matrices->k_min_Q_cH; cnt1 <= matrices->k_max_Q_cH; cnt1++) for (cnt2 = matrices->l_min_Q_cH[cnt1]; cnt2 <= matrices->l_max_Q_cH[cnt1]; cnt2 += 2) { matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cH[cnt1][cnt2 / 2]; if (update_c) { updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } for (cnt1 = matrices->k_min_Q_cI; cnt1 <= matrices->k_max_Q_cI; cnt1++) for (cnt2 = matrices->l_min_Q_cI[cnt1]; cnt2 <= matrices->l_max_Q_cI[cnt1]; cnt2 += 2) { matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cI[cnt1][cnt2 / 2]; if (update_c) { updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } for (cnt1 = matrices->k_min_Q_cM; cnt1 <= matrices->k_max_Q_cM; cnt1++) for (cnt2 = matrices->l_min_Q_cM[cnt1]; cnt2 <= matrices->l_max_Q_cM[cnt1]; cnt2 += 2) { matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cM[cnt1][cnt2 / 2]; if (update_c) { updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } matrices->Q_c_rem = matrices->Q_cH_rem + matrices->Q_cI_rem + matrices->Q_cM_rem; /* add the case were structure is unfolded chain */ if ((referenceBPs1[my_iindx[1] - seq_length] <= maxD1) && (referenceBPs2[my_iindx[1] - seq_length] <= maxD2)) { matrices->Q_c[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2] += 1.0 * scale[seq_length]; if (update_c) { updatePosteriorBoundaries(referenceBPs1[my_iindx[1] - seq_length], referenceBPs2[my_iindx[1] - seq_length], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } else { matrices->Q_c_rem += 1.0 * scale[seq_length]; } adjustArrayBoundaries(&matrices->Q_c, &matrices->k_min_Q_c, &matrices->k_max_Q_c, &matrices->l_min_Q_c, &matrices->l_max_Q_c, min_k_real, max_k_real, min_l_real, max_l_real ); } /* * ################################################### * stochastic backtracking * ################################################### */ PUBLIC char * vrna_pbacktrack_TwoD(vrna_fold_compound_t *vc, int d1, int d2) { return vrna_pbacktrack5_TwoD(vc, d1, d2, vc->length); } PUBLIC char * vrna_pbacktrack5_TwoD(vrna_fold_compound_t *vc, int d1, int d2, unsigned int length) { char *pstruc, *ptype; short *S1; unsigned int i, j, n, start, maxD1, maxD2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, ij, cnt1, cnt2, cnt3, cnt4, type, **l_min_Q, **l_max_Q, **l_min_Q_B, **l_max_Q_B, *k_min_Q, *k_max_Q, *k_min_Q_B, *k_max_Q_B; FLT_OR_DBL r, qt, *scale, ***Q, ***Q_B, *Q_rem, *Q_B_rem; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_mx_pf_t *matrices; n = vc->length; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; S1 = vc->sequence_encoding; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; Q = matrices->Q; l_min_Q = matrices->l_min_Q; l_max_Q = matrices->l_max_Q; k_min_Q = matrices->k_min_Q; k_max_Q = matrices->k_max_Q; Q_B = matrices->Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; Q_rem = matrices->Q_rem; Q_B_rem = matrices->Q_B_rem; cnt1 = cnt2 = cnt3 = cnt4 = -1; if (md->circ) { if (n != length) vrna_message_error("vrna_pbacktrack_TwoD@2Dfold.c: cotranscriptional backtracking for circular RNAs not supported!"); return pbacktrack_circ(vc, d1, d2); } if (length > n) vrna_message_error("vrna_pbacktrack_TwoD@2Dpfold.c: requested transcript length exceeds sequence length!"); #if 0 if (d1 > maxD1) vrna_message_error("pbacktrack@2Dpfold.c: distance to 1st reference structure to high!"); if (d2 > maxD2) vrna_message_error("pbacktrack@2Dpfold.c: distance to 2nd reference structure to high!"); #endif /* check whether the chosen neighborhood exists at all */ int dumb = 1; ij = my_iindx[1] - length; if ((d1 == -1) && (Q_rem[ij] != 0.)) { dumb = 0; } else { if ((k_min_Q[ij] <= d1) && (k_max_Q[ij] >= d1)) { int l_min = l_min_Q[ij][d1]; if ((d2 % 2) == (l_min % 2)) if ((l_min <= d2) && (l_max_Q[ij][d1] >= d2)) dumb = 0; } } if (dumb) { vrna_message_error("neighborhood %d:%d is not in scope of calculated partition function!\n" "pbacktrack@2Dpfold.c: exiting...", d1, d2); } pstruc = vrna_alloc((length + 1) * sizeof(char)); for (i = 0; i < length; i++) pstruc[i] = '.'; pstruc[i] = '\0'; start = 1; while (start < length) { int sn = my_iindx[start] - length; /* find i position of first pair */ FLT_OR_DBL qln_i = 0, qln_i1 = 0; if (d1 == -1) { qln_i = Q_rem[sn]; /* open chain ? */ if ((maxD1 > referenceBPs1[sn]) && (maxD2 > referenceBPs2[sn])) { r = vrna_urn() * qln_i; if (scale[length - start + 1] > r) return pstruc; } /* lets see if we find a base pair with i involved */ for (i = start; i < length; i++) { r = vrna_urn() * qln_i; qln_i1 = Q_rem[my_iindx[i + 1] - length]; da = referenceBPs1[sn] - referenceBPs1[my_iindx[i + 1] - length]; db = referenceBPs2[sn] - referenceBPs2[my_iindx[i + 1] - length]; for (cnt1 = k_min_Q[my_iindx[i + 1] - length]; cnt1 <= k_max_Q[my_iindx[i + 1] - length]; cnt1++) for (cnt2 = l_min_Q[my_iindx[i + 1] - length][cnt1]; cnt2 <= l_max_Q[my_iindx[i + 1] - length][cnt1]; cnt2 += 2) if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) qln_i1 += Q[my_iindx[i + 1] - length][cnt1][cnt2 / 2]; if (r > qln_i1 * scale[1]) break; qln_i = qln_i1; } if (i >= length) break; /* no more pairs */ /* i is paired, find pairing partner j */ r = vrna_urn() * (qln_i - qln_i1 * scale[1]); for (qt = 0, j = i + TURN + 1; j < length; j++) { ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { cnt1 = cnt2 = cnt3 = cnt4 = -1; double qkl = exp_E_ExtLoop(type, (i > 1) ? S1[i - 1] : -1, S1[j + 1], pf_params); if (Q_B_rem[ij] != 0.) { if (Q_rem[my_iindx[j + 1] - length] != 0.) { qt += qkl * Q_B_rem[ij] * Q_rem[my_iindx[j + 1] - length]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } if (Q[my_iindx[j + 1] - length]) { for (cnt3 = k_min_Q[my_iindx[j + 1] - length]; cnt3 <= k_max_Q[my_iindx[j + 1] - length]; cnt3++) for (cnt4 = l_min_Q[my_iindx[j + 1] - length][cnt3]; cnt4 <= l_max_Q[my_iindx[j + 1] - length][cnt3]; cnt4 += 2) { qt += qkl * Q_B_rem[ij] * Q[my_iindx[j + 1] - length][cnt3][cnt4 / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } if (Q_rem[my_iindx[j + 1] - length] != 0.) { cnt3 = cnt4 = -1; if (Q_B[ij]) { for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q_rem[my_iindx[j + 1] - length]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* if we still search for pairing partner j, we go on here... */ if (Q_B[ij] && Q[my_iindx[j + 1] - length]) { da = referenceBPs1[sn] - referenceBPs1[ij] - referenceBPs1[my_iindx[j + 1] - length]; db = referenceBPs2[sn] - referenceBPs2[ij] - referenceBPs2[my_iindx[j + 1] - length]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q[my_iindx[j + 1] - length]; cnt3 <= k_max_Q[my_iindx[j + 1] - length]; cnt3++) for (cnt4 = l_min_Q[my_iindx[j + 1] - length][cnt3]; cnt4 <= l_max_Q[my_iindx[j + 1] - length][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q[my_iindx[j + 1] - length][cnt3][cnt4 / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* end if(type) */ } /* end for(j) */ cnt1 = cnt2 = cnt3 = cnt4 = -1; /* dont forget the case where i pairs with n */ j = length; ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { double qkl = exp_E_ExtLoop(type, (i > 1) ? S1[i - 1] : -1, (j < n) ? S1[j + 1] : -1, pf_params); if (Q_B_rem[ij] != 0.) { qt += qkl * Q_B_rem[ij]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } /* if we still search for pairing partner j, we go on here... */ if (Q_B[ij]) { da = referenceBPs1[sn] - referenceBPs1[ij]; db = referenceBPs2[sn] - referenceBPs2[ij]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* end if(type) */ j++; pbacktrack_ext_loop_early_escape_rem: if (j == length + 1) vrna_message_error("pbacktrack@2Dpfold.c: backtracking failed in ext loop (rem)"); /* finally start backtracking the first exterior stem */ backtrack(vc, pstruc, cnt1, cnt2, i, j); if (j == length) break; start = j + 1; d1 = cnt3; d2 = cnt4; } /* end if d1 ==-1 */ else { qln_i = Q[sn][d1][d2 / 2]; /* open chain ? */ if ((d1 == referenceBPs1[sn]) && (d2 == referenceBPs2[sn])) { r = vrna_urn() * qln_i; if (scale[length - start + 1] > r) return pstruc; } for (i = start; i < length; i++) { r = vrna_urn() * qln_i; da = referenceBPs1[sn] - referenceBPs1[my_iindx[i + 1] - length]; db = referenceBPs2[sn] - referenceBPs2[my_iindx[i + 1] - length]; qln_i1 = 0; if (d1 >= da && d2 >= db) { if ( (d1 - da >= k_min_Q[my_iindx[i + 1] - length]) && (d1 - da <= k_max_Q[my_iindx[i + 1] - length])) { if ( (d2 - db >= l_min_Q[my_iindx[i + 1] - length][d1 - da]) && (d2 - db <= l_max_Q[my_iindx[i + 1] - length][d1 - da])) qln_i1 += Q[my_iindx[i + 1] - length][d1 - da][(d2 - db) / 2]; } } if (r > qln_i1 * scale[1]) break; /* i is paired */ qln_i = qln_i1; } if (i >= length) break; /* no more pairs */ /* now find the pairing partner j */ r = vrna_urn() * (qln_i - qln_i1 * scale[1]); for (qt = 0, j = i + 1; j < length; j++) { int type; ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { double qkl = 1.0; qkl *= exp_E_ExtLoop(type, (i > 1) ? S1[i - 1] : -1, S1[j + 1], pf_params); da = referenceBPs1[sn] - referenceBPs1[ij] - referenceBPs1[my_iindx[j + 1] - length]; db = referenceBPs2[sn] - referenceBPs2[ij] - referenceBPs2[my_iindx[j + 1] - length]; if ((d1 >= da) && (d2 >= db) && Q_B[ij] && Q[my_iindx[j + 1] - length]) { for (cnt1 = k_min_Q_B[ij]; cnt1 <= MIN2(k_max_Q_B[ij], d1 - da); cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= MIN2(l_max_Q_B[ij][cnt1], d2 - db); cnt2 += 2) if ((d1 - da - cnt1 >= k_min_Q[my_iindx[j + 1] - length]) && (d1 - da - cnt1 <= k_max_Q[my_iindx[j + 1] - length])) { if ((d2 - db - cnt2 >= l_min_Q[my_iindx[j + 1] - length][d1 - da - cnt1]) && (d2 - db - cnt2 <= l_max_Q[my_iindx[j + 1] - length][d1 - da - cnt1])) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q[my_iindx[j + 1] - length][d1 - da - cnt1][(d2 - db - cnt2) / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape; } } } } } /* now dont forget the case j==n */ j = length; ij = my_iindx[i] - j; int type = ptype[jindx[j] + i]; if (type) { double qkl = 1.0; qkl *= exp_E_ExtLoop(type, (i > 1) ? S1[i - 1] : -1, (j < n) ? S1[j + 1] : -1, pf_params); da = referenceBPs1[sn] - referenceBPs1[ij]; db = referenceBPs2[sn] - referenceBPs2[ij]; if (d1 >= da && d2 >= db) { cnt1 = d1 - da; cnt2 = d2 - db; if ((cnt1 >= k_min_Q_B[ij]) && (cnt1 <= k_max_Q_B[ij])) { if ((cnt2 >= l_min_Q_B[ij][cnt1]) && (cnt2 <= l_max_Q_B[ij][cnt1])) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape; /* j is paired */ } } } } j++; pbacktrack_ext_loop_early_escape: if (j == length + 1) vrna_message_error("pbacktrack@2Dpfold.c: backtracking failed in ext loop"); backtrack(vc, pstruc, cnt1, cnt2, i, j); if (j == length) break; start = j + 1; d1 -= cnt1 + da; d2 -= cnt2 + db; } /* end if d1!=-1 */ } return pstruc; } PRIVATE char * pbacktrack_circ(vrna_fold_compound_t *vc, int d1, int d2) { char *pstruc; unsigned int i, n, maxD1, maxD2, *referenceBPs1, *referenceBPs2; int *my_iindx, k_min_Q_c, k_max_Q_c, k_min_Q_cH, k_max_Q_cH, k_min_Q_cI, k_max_Q_cI, k_min_Q_cM, k_max_Q_cM, *l_min_Q_c, *l_max_Q_c, *l_min_Q_cH, *l_max_Q_cH, *l_min_Q_cI, *l_max_Q_cI, *l_min_Q_cM, *l_max_Q_cM; FLT_OR_DBL r, *scale, qot, **Q_c, **Q_cH, **Q_cI, **Q_cM, Q_c_rem, Q_cH_rem, Q_cI_rem, Q_cM_rem; vrna_mx_pf_t *matrices; matrices = vc->exp_matrices; n = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; scale = matrices->scale; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; Q_c = matrices->Q_c; l_min_Q_c = matrices->l_min_Q_c; l_max_Q_c = matrices->l_max_Q_c; k_min_Q_c = matrices->k_min_Q_c; k_max_Q_c = matrices->k_max_Q_c; Q_cH = matrices->Q_cH; l_min_Q_cH = matrices->l_min_Q_cH; l_max_Q_cH = matrices->l_max_Q_cH; k_min_Q_cH = matrices->k_min_Q_cH; k_max_Q_cH = matrices->k_max_Q_cH; Q_cI = matrices->Q_cI; l_min_Q_cI = matrices->l_min_Q_cI; l_max_Q_cI = matrices->l_max_Q_cI; k_min_Q_cI = matrices->k_min_Q_cI; k_max_Q_cI = matrices->k_max_Q_cI; Q_cM = matrices->Q_cM; l_min_Q_cM = matrices->l_min_Q_cM; l_max_Q_cM = matrices->l_max_Q_cM; k_min_Q_cM = matrices->k_min_Q_cM; k_max_Q_cM = matrices->k_max_Q_cM; Q_c_rem = matrices->Q_c_rem; Q_cH_rem = matrices->Q_cH_rem; Q_cI_rem = matrices->Q_cI_rem; Q_cM_rem = matrices->Q_cM_rem; /* check whether the chosen neighborhood exists at all */ int dumb = 1; if ((d1 == -1) && (Q_c_rem != 0.)) { dumb = 0; } else { if ((k_min_Q_c <= d1) && (k_max_Q_c >= d1)) { int l_min = l_min_Q_c[d1]; if ((d2 % 2) == (l_min % 2)) if ((l_min <= d2) && (l_max_Q_c[d1] >= d2)) dumb = 0; } } if (dumb) { vrna_message_error("neighborhood %d:%d is not in scope of calculated partition function!\n" "pbacktrack_circ@2Dpfold.c: exiting cheerless...", d1, d2); } pstruc = vrna_alloc((n + 1) * sizeof(char)); for (i = 0; i < n; i++) pstruc[i] = '.'; pstruc[i] = '\0'; /* now we come to the actual backtracking process */ qot = 0.; /* backtrack in rest-partition */ if (d1 == -1) { r = vrna_urn() * Q_c_rem; /* open chain ? */ if ((referenceBPs1[my_iindx[1] - n] > maxD1) || (referenceBPs2[my_iindx[1] - n] > maxD2)) { qot = 1.0 * scale[n]; if (qot >= r) goto pbacktrack_circ_escape; } qot += Q_cH_rem; if (qot >= r) { backtrack_qcH(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } qot += Q_cI_rem; if (qot >= r) { backtrack_qcI(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } qot += Q_cM_rem; if (qot >= r) { backtrack_qcM(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } vrna_message_error("pbacktrack_circ@2Dpfold.c: backtracking failed in exterior loop! Exiting cheerless..."); } /* normal backtracking */ else { r = vrna_urn() * Q_c[d1][d2 / 2]; /* open chain ? */ if ((referenceBPs1[my_iindx[1] - n] == d1) && (referenceBPs2[my_iindx[1] - n] == d2)) { qot += 1.0 * scale[n]; if (qot >= r) goto pbacktrack_circ_escape; } /* exterior hairpin loop ? */ if ((k_min_Q_cH <= d1) && (k_max_Q_cH >= d1)) { int l_min = l_min_Q_cH[d1]; if ((d2 % 2) == (l_min % 2)) { if ((l_min <= d2) && (l_max_Q_cH[d1] >= d2)) { qot += Q_cH[d1][d2 / 2]; if (qot >= r) { backtrack_qcH(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } } /* exterior interior loop ? */ if ((k_min_Q_cI <= d1) && (k_max_Q_cI >= d1)) { int l_min = l_min_Q_cI[d1]; if ((d2 % 2) == (l_min % 2)) { if ((l_min <= d2) && (l_max_Q_cI[d1] >= d2)) { qot += Q_cI[d1][d2 / 2]; if (qot >= r) { backtrack_qcI(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } } /* exterior multibranch loop ? */ if ((k_min_Q_cM <= d1) && (k_max_Q_cM >= d1)) { int l_min = l_min_Q_cM[d1]; if ((d2 % 2) == (l_min % 2)) { if ((l_min <= d2) && (l_max_Q_cM[d1] >= d2)) { qot += Q_cM[d1][d2 / 2]; if (qot >= r) { backtrack_qcM(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } } } pbacktrack_circ_escape: return pstruc; } PRIVATE void backtrack_qcH(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2) { char *ptype, *sequence; short *S1; unsigned int i, j, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int u, *my_iindx, *jindx, ij, cnt1, cnt2, type, **l_min_Q_B, **l_max_Q_B, *k_min_Q_B, *k_max_Q_B, *rtype; FLT_OR_DBL r, qt, *scale, qot, ***Q_B, **Q_cH, *Q_B_rem, Q_cH_rem; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; sequence = vc->sequence; n = vc->length; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; rtype = &(md->rtype[0]); S1 = vc->sequence_encoding; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; maxD1 = vc->maxD1; maxD2 = vc->maxD2; Q_B_rem = matrices->Q_B_rem; Q_B = matrices->Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; Q_cH_rem = matrices->Q_cH_rem; Q_cH = matrices->Q_cH; qot = qt = 0.; base_d1 = referenceBPs1[my_iindx[1] - n]; base_d2 = referenceBPs2[my_iindx[1] - n]; if (d1 == -1) { r = vrna_urn() * Q_cH_rem; for (i = 1; i < n; i++) for (j = i + TURN + 1; j <= n; j++) { char loopseq[10]; ij = my_iindx[i] - j; u = n - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; if (!type) continue; if (((type == 3) || (type == 4)) && no_closingGU) continue; type = rtype[type]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } qt = exp_E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, pf_params) * scale[u]; if (Q_B_rem[ij]) { qot += Q_B_rem[ij] * qt; if (qot >= r) { backtrack(vc, pstruc, d1, d2, i, j); return; } } da = base_d1 - referenceBPs1[ij]; db = base_d2 - referenceBPs2[ij]; if (Q_B[ij]) { for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) { if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { qot += Q_B[ij][cnt1][cnt2 / 2] * qt; if (qot >= r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); return; } } } } } } else { r = vrna_urn() * Q_cH[d1][d2 / 2]; for (i = 1; i < n; i++) for (j = i + TURN + 1; j <= n; j++) { char loopseq[10]; ij = my_iindx[i] - j; if (!Q_B[ij]) continue; u = n - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; if (!type) continue; if (((type == 3) || (type == 4)) && no_closingGU) continue; type = rtype[type]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } qt = exp_E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, pf_params) * scale[u]; da = base_d1 - referenceBPs1[ij]; db = base_d2 - referenceBPs2[ij]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) { if (((cnt1 + da) == d1) && ((cnt2 + db) == d2)) { qot += Q_B[ij][cnt1][cnt2 / 2] * qt; if (qot >= r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); return; } } } } } vrna_message_error("backtrack_qcH@2Dpfold.c: failed to find closing pair!"); } PRIVATE void backtrack_qcI(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2) { char *ptype; short *S1; unsigned int i, j, ij, p, q, pq, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4, type, **l_min_Q_B, **l_max_Q_B, *k_min_Q_B, *k_max_Q_B, *rtype; FLT_OR_DBL r, qt, *scale, qot, ***Q_B, *Q_B_rem, **Q_cI, Q_cI_rem; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; n = vc->length; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; rtype = &(md->rtype[0]); S1 = vc->sequence_encoding; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; maxD1 = vc->maxD1; maxD2 = vc->maxD2; Q_B = matrices->Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; Q_cI = matrices->Q_cI; Q_B_rem = matrices->Q_B_rem; Q_cI_rem = matrices->Q_cI_rem; qot = qt = 0.; base_d1 = referenceBPs1[my_iindx[1] - n]; base_d2 = referenceBPs2[my_iindx[1] - n]; if (d1 == -1) { r = vrna_urn() * Q_cI_rem; for (i = 1; i < n; i++) for (j = i + TURN + 1; j <= n; j++) { ij = my_iindx[i] - j; type = rtype[(unsigned int)ptype[jindx[j] + i]]; if (!type) continue; if (Q_B_rem[ij]) { for (p = j + 1; p < n; p++) { unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if (ln1 + i - 1 > MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if (ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for (q = qstart; q <= n; q++) { unsigned int ln2; int type2; pq = my_iindx[p] - q; ln2 = (i - 1) + (n - q); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[q] + p]; if (!type2) continue; qt = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[q + 1], S1[p - 1], S1[i - 1], S1[j + 1], pf_params) * scale[ln1 + ln2]; if (Q_B_rem[pq]) { qot += Q_B_rem[ij] * Q_B_rem[pq] * qt; if (qot > r) { backtrack(vc, pstruc, d1, d2, i, j); backtrack(vc, pstruc, d1, d2, p, q); return; } } if (Q_B[pq]) { for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++) for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2) { qot += Q_B_rem[ij] * Q_B[pq][cnt1][cnt2 / 2] * qt; if (qot > r) { backtrack(vc, pstruc, d1, d2, i, j); backtrack(vc, pstruc, cnt1, cnt2, p, q); return; } } } } } } if (Q_B[ij]) { for (p = j + 1; p < n; p++) { unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if (ln1 + i - 1 > MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if (ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for (q = qstart; q <= n; q++) { unsigned int ln2; int type2; pq = my_iindx[p] - q; ln2 = (i - 1) + (n - q); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[q] + p]; if (!type2) continue; qt = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[q + 1], S1[p - 1], S1[i - 1], S1[j + 1], pf_params) * scale[ln1 + ln2]; if (Q_B_rem[pq]) { for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) { qot += Q_B[ij][cnt1][cnt2 / 2] * Q_B_rem[pq] * qt; if (qot > r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); backtrack(vc, pstruc, d1, d2, p, q); return; } } } if (Q_B[pq]) { da = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; db = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_B[pq]; cnt3 <= k_max_Q_B[pq]; cnt3++) for (cnt4 = l_min_Q_B[pq][cnt3]; cnt4 <= l_max_Q_B[pq][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qot += Q_B[ij][cnt1][cnt2 / 2] * Q_B[pq][cnt3][cnt4 / 2] * qt; if (qot > r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); backtrack(vc, pstruc, cnt3, cnt4, p, q); return; } } } } } } } } } else { r = vrna_urn() * Q_cI[d1][d2 / 2]; for (i = 1; i < n; i++) for (j = i + TURN + 1; j <= n; j++) { ij = my_iindx[i] - j; type = rtype[(unsigned int)ptype[jindx[j] + i]]; if (!type) continue; if (!Q_B[ij]) continue; for (p = j + 1; p < n; p++) { unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if (ln1 + i - 1 > MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if (ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for (q = qstart; q <= n; q++) { unsigned int ln2; int type2; pq = my_iindx[p] - q; if (!Q_B[pq]) continue; ln2 = (i - 1) + (n - q); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[q] + p]; if (!type2) continue; qt = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[q + 1], S1[p - 1], S1[i - 1], S1[j + 1], pf_params) * scale[ln1 + ln2]; da = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; db = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_B[pq]; cnt3 <= k_max_Q_B[pq]; cnt3++) for (cnt4 = l_min_Q_B[pq][cnt3]; cnt4 <= l_max_Q_B[pq][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)) { qot += Q_B[ij][cnt1][cnt2 / 2] * Q_B[pq][cnt3][cnt4 / 2] * qt; if (qot > r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); backtrack(vc, pstruc, cnt3, cnt4, p, q); return; } } } } } } } } PRIVATE void backtrack_qcM(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2) { unsigned int k, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, cnt1, cnt2, cnt3, cnt4, **l_min_Q_M, **l_max_Q_M, **l_min_Q_M2, **l_max_Q_M2, *k_min_Q_M, *k_max_Q_M, *k_min_Q_M2, *k_max_Q_M2; FLT_OR_DBL r, qt, qot, ***Q_M, ***Q_M2, **Q_cM, *Q_M_rem, *Q_M2_rem, Q_cM_rem; vrna_exp_param_t *pf_params; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; matrices = vc->exp_matrices; n = vc->length; my_iindx = vc->iindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; maxD1 = vc->maxD1; maxD2 = vc->maxD2; Q_cM = matrices->Q_cM; Q_M = matrices->Q_M; l_min_Q_M = matrices->l_min_Q_M; l_max_Q_M = matrices->l_max_Q_M; k_min_Q_M = matrices->k_min_Q_M; k_max_Q_M = matrices->k_max_Q_M; Q_M2 = matrices->Q_M2; l_min_Q_M2 = matrices->l_min_Q_M2; l_max_Q_M2 = matrices->l_max_Q_M2; k_min_Q_M2 = matrices->k_min_Q_M2; k_max_Q_M2 = matrices->k_max_Q_M2; Q_cM_rem = matrices->Q_cM_rem; Q_M_rem = matrices->Q_M_rem; Q_M2_rem = matrices->Q_M2_rem; base_d1 = referenceBPs1[my_iindx[1] - n]; base_d2 = referenceBPs2[my_iindx[1] - n]; qot = qt = 0.; if (d1 == -1) { r = vrna_urn() * Q_cM_rem; for (k = TURN + 2; k < n - 2 * TURN - 3; k++) { if (Q_M_rem[my_iindx[1] - k]) { if (Q_M2[k + 1]) { for (cnt1 = k_min_Q_M2[k + 1]; cnt1 <= k_max_Q_M2[k + 1]; cnt1++) for (cnt2 = l_min_Q_M2[k + 1][cnt1]; cnt2 <= l_max_Q_M2[k + 1][cnt1]; cnt2 += 2) { qot += Q_M_rem[my_iindx[1] - k] * Q_M2[k + 1][cnt1][cnt2 / 2] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, d1, d2, 1, k); backtrack_qm2(vc, pstruc, cnt1, cnt2, k + 1); return; } } } if (Q_M2_rem[k + 1]) { qot += Q_M_rem[my_iindx[1] - k] * Q_M2_rem[k + 1] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, d1, d2, 1, k); backtrack_qm2(vc, pstruc, d1, d2, k + 1); return; } } } if (Q_M2_rem[k + 1]) { if (Q_M[my_iindx[1] - k]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) { qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * Q_M2_rem[k + 1] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vc, pstruc, d1, d2, k + 1); return; } } } } da = base_d1 - referenceBPs1[my_iindx[1] - k] - referenceBPs1[my_iindx[k + 1] - n]; db = base_d2 - referenceBPs2[my_iindx[1] - k] - referenceBPs2[my_iindx[k + 1] - n]; if (Q_M[my_iindx[1] - k] && Q_M2[k + 1]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M2[k + 1]; cnt3 <= k_max_Q_M2[k + 1]; cnt3++) for (cnt4 = l_min_Q_M2[k + 1][cnt3]; cnt4 <= l_max_Q_M2[k + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vc, pstruc, cnt3, cnt4, k + 1); return; } } } } } } else { r = vrna_urn() * Q_cM[d1][d2 / 2]; for (k = TURN + 2; k < n - 2 * TURN - 3; k++) { da = base_d1 - referenceBPs1[my_iindx[1] - k] - referenceBPs1[my_iindx[k + 1] - n]; db = base_d2 - referenceBPs2[my_iindx[1] - k] - referenceBPs2[my_iindx[k + 1] - n]; if (Q_M[my_iindx[1] - k] && Q_M2[k + 1]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M2[k + 1]; cnt3 <= k_max_Q_M2[k + 1]; cnt3++) for (cnt4 = l_min_Q_M2[k + 1][cnt3]; cnt4 <= l_max_Q_M2[k + 1][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)) { qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vc, pstruc, cnt3, cnt4, k + 1); return; } } } } } vrna_message_error("backtrack_qcM@2Dpfold.c: backtracking failed"); } PRIVATE void backtrack_qm2(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int k) { unsigned int l, n, maxD1, maxD2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4, *k_min_Q_M1, *k_max_Q_M1, **l_min_Q_M1, **l_max_Q_M1; FLT_OR_DBL r, qt, qot, ***Q_M2, ***Q_M1, *Q_M2_rem, *Q_M1_rem; vrna_mx_pf_t *matrices; matrices = vc->exp_matrices; n = vc->length; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; maxD1 = vc->maxD1; maxD2 = vc->maxD2; Q_M1_rem = matrices->Q_M1_rem; Q_M1 = matrices->Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; Q_M2_rem = matrices->Q_M2_rem; Q_M2 = matrices->Q_M2; qot = qt = 0.; if (d1 == -1) { r = vrna_urn() * Q_M2_rem[k]; for (l = k + TURN + 1; l < n - TURN - 1; l++) { if (Q_M1_rem[jindx[l] + k]) { if (Q_M1[jindx[n] + l + 1]) { for (cnt1 = k_min_Q_M1[jindx[n] + l + 1]; cnt1 <= k_max_Q_M1[jindx[n] + l + 1]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[n] + l + 1][cnt1]; cnt2 <= l_max_Q_M1[jindx[n] + l + 1][cnt1]; cnt2 += 2) { qot += Q_M1_rem[jindx[l] + k] * Q_M1[jindx[n] + l + 1][cnt1][cnt2 / 2]; if (qot > r) { backtrack_qm1(vc, pstruc, d1, d2, k, l); backtrack_qm1(vc, pstruc, cnt1, cnt2, l + 1, n); return; } } } if (Q_M1_rem[jindx[n] + l + 1]) { qot += Q_M1_rem[jindx[l] + k] * Q_M1_rem[jindx[n] + l + 1]; if (qot > r) { backtrack_qm1(vc, pstruc, d1, d2, k, l); backtrack_qm1(vc, pstruc, d1, d2, l + 1, n); return; } } } if (Q_M1_rem[jindx[n] + l + 1]) { if (Q_M1[jindx[l] + k]) { for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) { qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1_rem[jindx[n] + l + 1]; if (qot > r) { backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vc, pstruc, d1, d2, l + 1, n); return; } } } } if (!Q_M1[jindx[l] + k]) continue; if (!Q_M1[jindx[n] + l + 1]) continue; da = referenceBPs1[my_iindx[k] - n] - referenceBPs1[my_iindx[k] - l] - referenceBPs1[my_iindx[l + 1] - n]; db = referenceBPs2[my_iindx[k] - n] - referenceBPs2[my_iindx[k] - l] - referenceBPs2[my_iindx[l + 1] - n]; for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) { for (cnt3 = k_min_Q_M1[jindx[n] + l + 1]; cnt3 <= k_max_Q_M1[jindx[n] + l + 1]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[n] + l + 1][cnt3]; cnt4 <= l_max_Q_M1[jindx[n] + l + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[n] + l + 1][cnt3][cnt4 / 2]; if (qot > r) { backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vc, pstruc, cnt3, cnt4, l + 1, n); return; } } } } } } else { r = vrna_urn() * Q_M2[k][d1][d2 / 2]; for (l = k + TURN + 1; l < n - TURN - 1; l++) { if (!Q_M1[jindx[l] + k]) continue; if (!Q_M1[jindx[n] + l + 1]) continue; da = referenceBPs1[my_iindx[k] - n] - referenceBPs1[my_iindx[k] - l] - referenceBPs1[my_iindx[l + 1] - n]; db = referenceBPs2[my_iindx[k] - n] - referenceBPs2[my_iindx[k] - l] - referenceBPs2[my_iindx[l + 1] - n]; for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) { for (cnt3 = k_min_Q_M1[jindx[n] + l + 1]; cnt3 <= k_max_Q_M1[jindx[n] + l + 1]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[n] + l + 1][cnt3]; cnt4 <= l_max_Q_M1[jindx[n] + l + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)) { qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[n] + l + 1][cnt3][cnt4 / 2]; if (qot > r) { backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vc, pstruc, cnt3, cnt4, l + 1, n); return; } } } } } } vrna_message_error("backtrack_qm2@2Dpfold.c: backtracking failed"); } PRIVATE void backtrack(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j) { FLT_OR_DBL *scale; unsigned int maxD1, maxD2, base_d1, base_d2, da, db; unsigned int *referenceBPs1, *referenceBPs2; char *ptype, *sequence; short *S1, *reference_pt1, *reference_pt2; int *my_iindx, *jindx, ij, cnt1, cnt2, cnt3, cnt4, *rtype; vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */ vrna_md_t *md; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; sequence = vc->sequence; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; rtype = &(md->rtype[0]); S1 = vc->sequence_encoding; reference_pt1 = vc->reference_pt1; reference_pt2 = vc->reference_pt2; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; FLT_OR_DBL ***Q_B, ***Q_M, ***Q_M1, *Q_B_rem, *Q_M_rem, *Q_M1_rem; int *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1, *k_min_Q_B, *k_max_Q_B; int **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1, **l_min_Q_B, **l_max_Q_B; Q_B = matrices->Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; Q_M = matrices->Q_M; k_min_Q_M = matrices->k_min_Q_M; k_max_Q_M = matrices->k_max_Q_M; l_min_Q_M = matrices->l_min_Q_M; l_max_Q_M = matrices->l_max_Q_M; Q_M1 = matrices->Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; Q_B_rem = matrices->Q_B_rem; Q_M_rem = matrices->Q_M_rem; Q_M1_rem = matrices->Q_M1_rem; cnt1 = cnt2 = cnt3 = cnt4 = -1; do { double r, qbt1 = 0.; unsigned int k, l, u, u1; int type; pstruc[i - 1] = '('; pstruc[j - 1] = ')'; r = 0.; ij = my_iindx[i] - j; l = INF; if (d1 == -1) { r = vrna_urn() * Q_B_rem[ij]; if (r == 0.) vrna_message_error("backtrack@2Dpfold.c: backtracking failed\n"); type = ptype[jindx[j] + i]; u = j - i - 1; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_d1 + referenceBPs1[ij]; db = base_d2 + referenceBPs2[ij]; /* hairpin ? */ if ((da > maxD1) || (db > maxD2)) if (!(((type == 3) || (type == 4)) && no_closingGU)) qbt1 = exp_E_Hairpin(u, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[u + 2]; if (qbt1 >= r) return; /* found the hairpin we're done */ /* lets see if we form an interior loop */ for (k = i + 1; k <= MIN2(i + MAXLOOP + 1, j - TURN - 2); k++) { unsigned int u_pre, lmin; u1 = k - i - 1; lmin = k + TURN + 1; u_pre = u1 + j; /* lmin = MAX2(k + TURN + 1, u1 + j - 1 - MAXLOOP) */ if (u_pre > lmin + MAXLOOP) lmin = u_pre - 1 - MAXLOOP; for (l = lmin; l < j; l++) { int type_2; type_2 = ptype[jindx[l] + k]; if (type_2) { cnt1 = cnt2 = -1; da = base_d1 + referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - l]; db = base_d2 + referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - l]; type_2 = rtype[type_2]; FLT_OR_DBL tmp_en = exp_E_IntLoop(u1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[u1 + j - l + 1]; if (Q_B_rem[my_iindx[k] - l] != 0.) { qbt1 += Q_B_rem[my_iindx[k] - l] * tmp_en; if (qbt1 > r) goto backtrack_int_early_escape_rem; } if (Q_B[my_iindx[k] - l]) { for (cnt1 = k_min_Q_B[my_iindx[k] - l]; cnt1 <= k_max_Q_B[my_iindx[k] - l]; cnt1++) for (cnt2 = l_min_Q_B[my_iindx[k] - l][cnt1]; cnt2 <= l_max_Q_B[my_iindx[k] - l][cnt1]; cnt2 += 2) if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { qbt1 += Q_B[my_iindx[k] - l][cnt1][cnt2 / 2] * tmp_en; if (qbt1 > r) goto backtrack_int_early_escape_rem; } } } } } backtrack_int_early_escape_rem: if (l < j) { i = k; j = l; d1 = cnt1; d2 = cnt2; } else { break; } } else { if ((d1 >= k_min_Q_B[ij]) && (d1 <= k_max_Q_B[ij])) if ((d2 >= l_min_Q_B[ij][d1]) && (d2 <= l_max_Q_B[ij][d1])) r = vrna_urn() * Q_B[ij][d1][d2 / 2]; if (r == 0.) vrna_message_error("backtrack@2Dpfold.c: backtracking failed\n"); type = ptype[jindx[j] + i]; u = j - i - 1; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_d1 + referenceBPs1[ij]; db = base_d2 + referenceBPs2[ij]; /*hairpin contribution*/ if ((da == d1) && (db == d2)) if (!(((type == 3) || (type == 4)) && no_closingGU)) qbt1 = exp_E_Hairpin(u, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[u + 2]; if (qbt1 >= r) return; /* found the hairpin we're done */ for (k = i + 1; k <= MIN2(i + MAXLOOP + 1, j - TURN - 2); k++) { unsigned int u_pre, lmin; u1 = k - i - 1; lmin = k + TURN + 1; u_pre = u1 + j; /* lmin = MAX2(k + TURN + 1, u1 + j - 1 - MAXLOOP) */ if (u_pre > lmin + MAXLOOP) lmin = u_pre - 1 - MAXLOOP; for (l = lmin; l < j; l++) { int type_2; type_2 = ptype[jindx[l] + k]; if (type_2) { da = base_d1 + referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - l]; db = base_d2 + referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - l]; type_2 = rtype[type_2]; FLT_OR_DBL tmp_en = exp_E_IntLoop(u1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[u1 + j - l + 1]; if (d1 >= da && d2 >= db) { if ((d1 - da >= k_min_Q_B[my_iindx[k] - l]) && (d1 - da <= k_max_Q_B[my_iindx[k] - l])) { if ((d2 - db >= l_min_Q_B[my_iindx[k] - l][d1 - da]) && (d2 - db <= l_max_Q_B[my_iindx[k] - l][d1 - da])) { cnt1 = d1 - da; cnt2 = d2 - db; qbt1 += Q_B[my_iindx[k] - l][cnt1][cnt2 / 2] * tmp_en; if (qbt1 > r) goto backtrack_int_early_escape; } } } } } } backtrack_int_early_escape: if (l < j) { i = k; j = l; d1 = cnt1; d2 = cnt2; } else { break; } } } while (1); /* backtrack in multi-loop */ { double r, qt; unsigned int k, ii, jj; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; base_d1 += referenceBPs1[my_iindx[i] - j]; base_d2 += referenceBPs2[my_iindx[i] - j]; i++; j--; /* find the first split index */ ii = my_iindx[i]; /* ii-j=[i,j] */ jj = jindx[j]; /* jj+i=[j,i] */ if (d1 == -1) { /* get total contribution for current part */ for (qt = 0., k = i + 1; k < j; k++) { if (Q_M_rem[ii - k + 1] != 0.) { if (Q_M1[jj + k]) { for (cnt1 = k_min_Q_M1[jj + k]; cnt1 <= k_max_Q_M1[jj + k]; cnt1++) for (cnt2 = l_min_Q_M1[jj + k][cnt1]; cnt2 <= l_max_Q_M1[jj + k][cnt1]; cnt2 += 2) qt += Q_M_rem[ii - k + 1] * Q_M1[jj + k][cnt1][cnt2 / 2]; } if (Q_M1_rem[jj + k] != 0.) qt += Q_M_rem[ii - k + 1] * Q_M1_rem[jj + k]; } if (Q_M1_rem[jj + k] != 0.) { if (Q_M[ii - k + 1]) { for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jj + k]; } } /* calculate introduced distance to reference structures */ if (!Q_M[ii - k + 1]) continue; if (!Q_M1[jj + k]) continue; da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j]; db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j]; /* collect all contributing energies */ for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M1[jj + k]; cnt3 <= k_max_Q_M1[jj + k]; cnt3++) for (cnt4 = l_min_Q_M1[jj + k][cnt3]; cnt4 <= l_max_Q_M1[jj + k][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2]; } /* throw the dice */ r = vrna_urn() * qt; for (qt = 0., k = i + 1; k < j; k++) { cnt1 = cnt2 = cnt3 = cnt4 = -1; if (Q_M_rem[ii - k + 1] != 0.) { if (Q_M1_rem[jj + k] != 0) { qt += Q_M_rem[ii - k + 1] * Q_M1_rem[jj + k]; if (qt >= r) goto backtrack_ml_early_escape; } if (Q_M1[jj + k]) { for (cnt3 = k_min_Q_M1[jj + k]; cnt3 <= k_max_Q_M1[jj + k]; cnt3++) for (cnt4 = l_min_Q_M1[jj + k][cnt3]; cnt4 <= l_max_Q_M1[jj + k][cnt3]; cnt4 += 2) { qt += Q_M_rem[ii - k + 1] * Q_M1[jj + k][cnt3][cnt4 / 2]; if (qt >= r) goto backtrack_ml_early_escape; } } } if (Q_M1_rem[jj + k] != 0.) { cnt3 = cnt4 = -1; if (Q_M[ii - k + 1]) { for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) { qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jj + k]; if (qt >= r) goto backtrack_ml_early_escape; } } } /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j]; db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j]; /* collect all contributing energies */ if (!Q_M[ii - k + 1]) continue; if (!Q_M1[jj + k]) continue; for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M1[jj + k]; cnt3 <= k_max_Q_M1[jj + k]; cnt3++) for (cnt4 = l_min_Q_M1[jj + k][cnt3]; cnt4 <= l_max_Q_M1[jj + k][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2]; if (qt >= r) goto backtrack_ml_early_escape; } } } else { /* get total contribution */ for (qt = 0., k = i + 1; k < j; k++) { /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j]; db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j]; /* collect all contributing energies */ if (d1 >= da && d2 >= db && Q_M[ii - k + 1] && Q_M1[jj + k]) { for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= MIN2(k_max_Q_M[ii - k + 1], d1 - da); cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[ii - k + 1][cnt1], d2 - db); cnt2 += 2) if ((d1 - cnt1 - da >= k_min_Q_M1[jj + k]) && (d1 - cnt1 - da <= k_max_Q_M1[jj + k])) if ((d2 - cnt2 - db >= l_min_Q_M1[jj + k][d1 - da - cnt1]) && (d2 - cnt2 - db <= l_max_Q_M1[jj + k][d1 - cnt1 - da])) qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][d1 - da - cnt1][(d2 - db - cnt2) / 2]; } } r = vrna_urn() * qt; for (qt = 0., k = i + 1; k < j; k++) { /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j]; db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j]; /* collect all contributing energies */ if (d1 >= da && d2 >= db && Q_M[ii - k + 1] && Q_M1[jj + k]) { for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= MIN2(k_max_Q_M[ii - k + 1], d1 - da); cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[ii - k + 1][cnt1], d2 - db); cnt2 += 2) if ((d1 - cnt1 - da >= k_min_Q_M1[jj + k]) && (d1 - cnt1 - da <= k_max_Q_M1[jj + k])) { if ((d2 - cnt2 - db >= l_min_Q_M1[jj + k][d1 - da - cnt1]) && (d2 - cnt2 - db <= l_max_Q_M1[jj + k][d1 - cnt1 - da])) { cnt3 = d1 - da - cnt1; cnt4 = d2 - db - cnt2; qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2]; if (qt >= r) goto backtrack_ml_early_escape; } } } } } if (k >= j) vrna_message_error("backtrack failed, can't find split index "); backtrack_ml_early_escape: backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j); j = k - 1; backtrack_qm(vc, pstruc, cnt1, cnt2, i, j); } } PRIVATE void backtrack_qm1(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j) { /* i is paired to l, i<l<j; backtrack in qm1 to find l */ FLT_OR_DBL r, qt, *scale; unsigned int maxD1, maxD2, da, db; unsigned int *referenceBPs1, *referenceBPs2; char *ptype; short *S1; int *my_iindx, *jindx, cnt1, cnt2; vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */ vrna_mx_pf_t *matrices; pf_params = vc->exp_params; matrices = vc->exp_matrices; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; S1 = vc->sequence_encoding; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; FLT_OR_DBL ***Q_B, ***Q_M1, *Q_B_rem, *Q_M1_rem; int *k_min_Q_M1, *k_max_Q_M1, *k_min_Q_B, *k_max_Q_B; int **l_min_Q_M1, **l_max_Q_M1, **l_min_Q_B, **l_max_Q_B; Q_B = matrices->Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; Q_M1 = matrices->Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; Q_B_rem = matrices->Q_B_rem; Q_M1_rem = matrices->Q_M1_rem; unsigned int ii, l; int type; r = 0.; cnt1 = cnt2 = -1; /* find qm1 contribution */ if (d1 == -1) { r = vrna_urn() * Q_M1_rem[jindx[j] + i]; } else { if ((d1 >= k_min_Q_M1[jindx[j] + i]) && (d1 <= k_max_Q_M1[jindx[j] + i])) if ((d2 >= l_min_Q_M1[jindx[j] + i][d1]) && (d2 <= l_max_Q_M1[jindx[j] + i][d1])) r = vrna_urn() * Q_M1[jindx[j] + i][d1][d2 / 2]; } if (r == 0.) vrna_message_error("backtrack_qm1@2Dpfold.c: backtracking failed\n"); ii = my_iindx[i]; for (qt = 0., l = i + TURN + 1; l <= j; l++) { type = ptype[jindx[l] + i]; if (type) { FLT_OR_DBL tmp = exp_E_MLstem(type, S1[i - 1], S1[l + 1], pf_params) * pow(pf_params->expMLbase, j - l) * scale[j - l]; /* compute the introduced distance to reference structures */ da = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[i] - l]; db = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[i] - l]; cnt1 = cnt2 = -1; if (d1 == -1) { if (Q_B_rem[ii - l] != 0.) { qt += Q_B_rem[ii - l] * tmp; if (qt >= r) goto backtrack_qm1_early_escape; } if (Q_B[ii - l]) { for (cnt1 = k_min_Q_B[ii - l]; cnt1 <= k_max_Q_B[ii - l]; cnt1++) for (cnt2 = l_min_Q_B[ii - l][cnt1]; cnt2 <= l_max_Q_B[ii - l][cnt1]; cnt2 += 2) if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { qt += Q_B[ii - l][cnt1][cnt2 / 2] * tmp; if (qt >= r) goto backtrack_qm1_early_escape; } } } else { /* get energy contributions */ if (d1 >= da && d2 >= db) { if ((d1 - da >= k_min_Q_B[ii - l]) && (d1 - da <= k_max_Q_B[ii - l])) { if ((d2 - db >= l_min_Q_B[ii - l][d1 - da]) && (d2 - db <= l_max_Q_B[ii - l][d1 - da])) { cnt1 = d1 - da; cnt2 = d2 - db; qt += Q_B[ii - l][cnt1][cnt2 / 2] * tmp; if (qt >= r) goto backtrack_qm1_early_escape; } } } } } } if (l > j) vrna_message_error("backtrack failed in qm1"); backtrack_qm1_early_escape: backtrack(vc, pstruc, cnt1, cnt2, i, l); } PRIVATE void backtrack_qm(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j) { /* divide multiloop into qm and qm1 */ FLT_OR_DBL r, *scale; unsigned int maxD1, maxD2, da, db, da2, db2; unsigned int *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4; vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */ vrna_mx_pf_t *matrices; pf_params = vc->exp_params; matrices = vc->exp_matrices; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; FLT_OR_DBL ***Q_M, ***Q_M1, *Q_M_rem, *Q_M1_rem; int *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1; int **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1; Q_M = matrices->Q_M; k_min_Q_M = matrices->k_min_Q_M; k_max_Q_M = matrices->k_max_Q_M; l_min_Q_M = matrices->l_min_Q_M; l_max_Q_M = matrices->l_max_Q_M; Q_M1 = matrices->Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; Q_M_rem = matrices->Q_M_rem; Q_M1_rem = matrices->Q_M1_rem; double qmt = 0; unsigned int k; cnt1 = cnt2 = cnt3 = cnt4 = -1; r = 0.; while (j > i) { /* now backtrack [i ... j] in qm[] */ /* find qm contribution */ if (d1 == -1) { r = vrna_urn() * Q_M_rem[my_iindx[i] - j]; } else { if (Q_M[my_iindx[i] - j]) if ((d1 >= k_min_Q_M[my_iindx[i] - j]) && (d1 <= k_max_Q_M[my_iindx[i] - j])) if ((d2 >= l_min_Q_M[my_iindx[i] - j][d1]) && (d2 <= l_max_Q_M[my_iindx[i] - j][d1])) r = vrna_urn() * Q_M[my_iindx[i] - j][d1][d2 / 2]; } if (r == 0.) vrna_message_error("backtrack_qm@2Dpfold.c: backtracking failed in finding qm contribution\n"); qmt = 0.; if (d1 == -1) { if (Q_M1_rem[jindx[j] + i] != 0.) { qmt += Q_M1_rem[jindx[j] + i]; if (qmt >= r) { backtrack_qm1(vc, pstruc, d1, d2, i, j); return; } } for (k = i + 1; k <= j; k++) { FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i]; if (Q_M1_rem[jindx[j] + k] != 0.) { qmt += Q_M1_rem[jindx[j] + k] * tmp; if (qmt >= r) { backtrack_qm1(vc, pstruc, d1, d2, k, j); return; } } da2 = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - j]; db2 = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - j]; if (Q_M1[jindx[j] + k]) { for (cnt1 = k_min_Q_M1[jindx[j] + k]; cnt1 <= k_max_Q_M1[jindx[j] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[j] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[j] + k][cnt1]; cnt2 += 2) if (((cnt1 + da2) > maxD1) || ((cnt2 + db2) > maxD2)) { qmt += Q_M1[jindx[j] + k][cnt1][cnt2 / 2] * tmp; if (qmt >= r) { backtrack_qm1(vc, pstruc, cnt1, cnt2, k, j); return; } } } da = da2 - referenceBPs1[my_iindx[i] - k + 1]; db = db2 - referenceBPs2[my_iindx[i] - k + 1]; cnt1 = cnt2 = cnt3 = cnt4 = -1; if (Q_M_rem[my_iindx[i] - k + 1] != 0.) { if (Q_M1_rem[jindx[j] + k] != 0.) { qmt += Q_M_rem[my_iindx[i] - k + 1] * Q_M1_rem[jindx[j] + k]; if (qmt >= r) goto backtrack_qm_early_escape; } if (Q_M1[jindx[j] + k]) { for (cnt3 = k_min_Q_M1[jindx[j] + k]; cnt3 <= k_max_Q_M1[jindx[j] + k]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[j] + k][cnt3]; cnt4 <= l_max_Q_M1[jindx[j] + k][cnt3]; cnt4 += 2) { qmt += Q_M_rem[my_iindx[i] - k + 1] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2]; if (qmt >= r) goto backtrack_qm_early_escape; } } } if (Q_M1_rem[jindx[j] + k] != 0.) { cnt3 = cnt4 = -1; if (Q_M[my_iindx[i] - k + 1]) { for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1]; cnt1 <= k_max_Q_M[my_iindx[i] - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 <= l_max_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 += 2) { qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jindx[j] + k]; if (qmt >= r) goto backtrack_qm_early_escape; } } } if (!Q_M[my_iindx[i] - k + 1]) continue; if (!Q_M1[jindx[j] + k]) continue; for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1]; cnt1 <= k_max_Q_M[my_iindx[i] - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 <= l_max_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M1[jindx[j] + k]; cnt3 <= k_max_Q_M1[jindx[j] + k]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[j] + k][cnt3]; cnt4 <= l_max_Q_M1[jindx[j] + k][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2]; if (qmt >= r) goto backtrack_qm_early_escape; } } } else { /* find corresponding qm1 contribution */ if (Q_M1[jindx[j] + i]) { if ((d1 >= k_min_Q_M1[jindx[j] + i]) && (d1 <= k_max_Q_M1[jindx[j] + i])) if ((d2 >= l_min_Q_M1[jindx[j] + i][d1]) && (d2 <= l_max_Q_M1[jindx[j] + i][d1])) qmt = Q_M1[jindx[j] + i][d1][d2 / 2]; } k = i; if (qmt < r) { for (k = i + 1; k <= j; k++) { /* calculate introduced distancies to reference structures */ da2 = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - j]; db2 = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - j]; da = da2 - referenceBPs1[my_iindx[i] - k + 1]; db = db2 - referenceBPs2[my_iindx[i] - k + 1]; FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i]; /* collect unpaired + qm1 contributions */ if (d1 >= da2 && d2 >= db2) { if ((d1 - da2 >= k_min_Q_M1[jindx[j] + k]) && (d1 - da2 <= k_max_Q_M1[jindx[j] + k])) { if ((d2 - db2 >= l_min_Q_M1[jindx[j] + k][d1 - da2]) && (d2 - db2 <= l_max_Q_M1[jindx[j] + k][d1 - da2])) { cnt3 = d1 - da2; cnt4 = d2 - db2; qmt += Q_M1[jindx[j] + k][cnt3][cnt4 / 2] * tmp; if (qmt >= r) { backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j); return; } } } } /* collect qm + qm1 contributions */ if (d1 >= da && d2 >= db && Q_M[my_iindx[i] - k + 1] && Q_M1[jindx[j] + k]) { for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1]; cnt1 <= MIN2(k_max_Q_M[my_iindx[i] - k + 1], d1 - da); cnt1++) for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[my_iindx[i] - k + 1][cnt1], d2 - db); cnt2 += 2) if ((d1 - da - cnt1 >= k_min_Q_M1[jindx[j] + k]) && (d1 - da - cnt1 <= k_max_Q_M1[jindx[j] + k])) { if ((d2 - db - cnt2 >= l_min_Q_M1[jindx[j] + k][d1 - da - cnt1]) && (d2 - db - cnt2 <= l_max_Q_M1[jindx[j] + k][d1 - da - cnt1])) { cnt3 = d1 - da - cnt1; cnt4 = d2 - db - cnt2; qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2]; if (qmt >= r) goto backtrack_qm_early_escape; } } } } } else { backtrack_qm1(vc, pstruc, d1, d2, k, j); return; } } if (k > j) vrna_message_error("backtrack_qm@2Dpfold.c: backtrack failed in qm"); backtrack_qm_early_escape: backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j); if (k < i + TURN) break; /* no more pairs */ d1 = cnt1; d2 = cnt2; if (d1 == referenceBPs1[my_iindx[i] - k + 1] && d2 == referenceBPs2[my_iindx[i] - k + 1]) { /* is interval [i,k] totally unpaired? */ FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i]; r = vrna_urn() * (Q_M[my_iindx[i] - k + 1][d1][d2 / 2] + tmp); if (tmp >= r) return; /* no more pairs */ } j = k - 1; } } PRIVATE void adjustArrayBoundaries(FLT_OR_DBL ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_post, int k_max_post, int *l_min_post, int *l_max_post) { int cnt1; int k_diff_pre = k_min_post - *k_min; int mem_size = k_max_post - k_min_post + 1; if (k_min_post < INF) { /* free all the unused memory behind actual data */ for (cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } /* free unused memory before actual data */ for (cnt1 = *k_min; cnt1 < k_min_post; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } /* move data to front and thereby eliminating unused memory in front of actual data */ if (k_diff_pre > 0) { memmove((FLT_OR_DBL **)(*array), ((FLT_OR_DBL **)(*array)) + k_diff_pre, sizeof(FLT_OR_DBL *) * mem_size); memmove((int *)(*l_min), ((int *)(*l_min)) + k_diff_pre, sizeof(int) * mem_size); memmove((int *)(*l_max), ((int *)(*l_max)) + k_diff_pre, sizeof(int) * mem_size); } /* reallocating memory to actual size used */ *array += *k_min; *array = (FLT_OR_DBL **)realloc(*array, sizeof(FLT_OR_DBL *) * mem_size); *array -= k_min_post; *l_min += *k_min; *l_min = (int *)realloc(*l_min, sizeof(int) * mem_size); *l_min -= k_min_post; *l_max += *k_min; *l_max = (int *)realloc(*l_max, sizeof(int) * mem_size); *l_max -= k_min_post; for (cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++) { if (l_min_post[cnt1] < INF) { /* new memsize */ mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1) / 2 + 1; /* reshift the pointer */ (*array)[cnt1] += (*l_min)[cnt1] / 2; int shift = (l_min_post[cnt1] % 2 == (*l_min)[cnt1] % 2) ? 0 : 1; /* eliminate unused memory in front of actual data */ unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1]) / 2 + shift; if (start > 0) memmove((FLT_OR_DBL *)((*array)[cnt1]), (FLT_OR_DBL *)((*array)[cnt1]) + start, sizeof(FLT_OR_DBL) * mem_size); (*array)[cnt1] = (FLT_OR_DBL *)realloc((*array)[cnt1], sizeof(FLT_OR_DBL) * mem_size); (*array)[cnt1] -= l_min_post[cnt1] / 2; } else { /* free according memory */ (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } (*l_min)[cnt1] = l_min_post[cnt1]; (*l_max)[cnt1] = l_max_post[cnt1]; } } else { /* we have to free all unused memory */ for (cnt1 = *k_min; cnt1 <= *k_max; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } (*l_min) += *k_min; (*l_max) += *k_min; free(*l_min); free(*l_max); (*array) += *k_min; free(*array); *array = NULL; } l_min_post += *k_min; l_max_post += *k_min; *k_min = k_min_post; *k_max = k_max_post; free(l_min_post); free(l_max_post); } PRIVATE INLINE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l) { int i; *min_k = INF; *max_k = 0; *min_l = (int *)vrna_alloc(sizeof(int) * size); *max_l = (int *)vrna_alloc(sizeof(int) * size); for (i = 0; i < size; i++) { (*min_l)[i] = INF; (*max_l)[i] = 0; } *min_l -= shift; *max_l -= shift; } PRIVATE INLINE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l) { (*min_l)[d1] = MIN2((*min_l)[d1], d2); (*max_l)[d1] = MAX2((*max_l)[d1], d2); *min_k = MIN2(*min_k, d1); *max_k = MAX2(*max_k, d1); } PRIVATE INLINE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l) { int cnt; int mem = max_k_pre - min_k_pre + 1; *min_k = min_k_pre; *max_k = max_k_pre; *min_l = (int *)vrna_alloc(sizeof(int) * mem); *max_l = (int *)vrna_alloc(sizeof(int) * mem); *min_l -= min_k_pre; *max_l -= min_k_pre; /* for each k guess the according minimum l*/ for (cnt = min_k_pre; cnt <= max_k_pre; cnt++) { (*min_l)[cnt] = min_l_pre; (*max_l)[cnt] = max_l_pre; while ((*min_l)[cnt] + cnt < bpdist) (*min_l)[cnt]++; if ((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2)) (*min_l)[cnt]++; } } PRIVATE INLINE void prepareArray(FLT_OR_DBL ***array, int min_k, int max_k, int *min_l, int *max_l) { int i, mem; *array = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (max_k - min_k + 1)); *array -= min_k; for (i = min_k; i <= max_k; i++) { mem = (max_l[i] - min_l[i] + 1) / 2 + 1; (*array)[i] = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * mem); (*array)[i] -= min_l[i] / 2; } } /* ################################# # DEPRECATED FUNCTIONS BELOW # ################################# */ PRIVATE void crosslink(TwoDpfold_vars *vars) { vrna_fold_compound_t *c; vrna_mx_pf_t *m; c = vars->compatibility; m = c->exp_matrices; vars->sequence = c->sequence; vars->seq_length = c->length; vars->reference_pt1 = c->reference_pt1; vars->reference_pt2 = c->reference_pt2; vars->referenceBPs1 = c->referenceBPs1; vars->referenceBPs2 = c->referenceBPs2; vars->mm1 = c->mm1; vars->mm2 = c->mm2; vars->bpdist = c->bpdist; vars->dangles = c->exp_params->model_details.dangles; vars->circ = c->exp_params->model_details.circ; vars->temperature = c->exp_params->model_details.temperature; vars->init_temp = c->exp_params->model_details.temperature; vars->pf_scale = c->exp_params->pf_scale; vars->pf_params = c->exp_params; vars->scale = m->scale; vars->ptype = c->ptype_pf_compat; vars->S = c->sequence_encoding2; vars->S1 = c->sequence_encoding; vars->jindx = c->jindx; vars->my_iindx = c->iindx; vars->maxD1 = c->maxD1; vars->maxD2 = c->maxD2; vars->Q = m->Q; vars->l_min_values = m->l_min_Q; vars->l_max_values = m->l_max_Q; vars->k_min_values = m->k_min_Q; vars->k_max_values = m->k_max_Q; vars->Q_B = m->Q_B; vars->l_min_values_b = m->l_min_Q_B; vars->l_max_values_b = m->l_max_Q_B; vars->k_min_values_b = m->k_min_Q_B; vars->k_max_values_b = m->k_max_Q_B; vars->Q_M = m->Q_M; vars->l_min_values_m = m->l_min_Q_M; vars->l_max_values_m = m->l_max_Q_M; vars->k_min_values_m = m->k_min_Q_M; vars->k_max_values_m = m->k_max_Q_M; vars->Q_M1 = m->Q_M1; vars->l_min_values_m1 = m->l_min_Q_M1; vars->l_max_values_m1 = m->l_max_Q_M1; vars->k_min_values_m1 = m->k_min_Q_M1; vars->k_max_values_m1 = m->k_max_Q_M1; vars->Q_M2_rem = m->Q_M2_rem; vars->Q_M2 = m->Q_M2; vars->l_min_values_m2 = m->l_min_Q_M2; vars->l_max_values_m2 = m->l_max_Q_M2; vars->k_min_values_m2 = m->k_min_Q_M2; vars->k_max_values_m2 = m->k_max_Q_M2; vars->Q_c = m->Q_c; vars->Q_cH = m->Q_cH; vars->Q_cI = m->Q_cI; vars->Q_cM = m->Q_cM; vars->Q_c_rem = m->Q_c_rem; vars->Q_cH_rem = m->Q_cH_rem; vars->Q_cI_rem = m->Q_cI_rem; vars->Q_cM_rem = m->Q_cM_rem; vars->Q_rem = m->Q_rem; vars->Q_B_rem = m->Q_B_rem; vars->Q_M_rem = m->Q_M_rem; vars->Q_M1_rem = m->Q_M1_rem; } PUBLIC char * TwoDpfold_pbacktrack(TwoDpfold_vars *vars, int d1, int d2) { return vrna_pbacktrack_TwoD(vars->compatibility, d1, d2); } PUBLIC char * TwoDpfold_pbacktrack5(TwoDpfold_vars *vars, int d1, int d2, unsigned int length) { return vrna_pbacktrack5_TwoD(vars->compatibility, d1, d2, length); } PUBLIC TwoDpfold_vars * get_TwoDpfold_variables(const char *seq, const char *structure1, char *structure2, int circ) { vrna_md_t md; TwoDpfold_vars *vars; set_model_details(&md); md.circ = circ; vars = (TwoDpfold_vars *)malloc(sizeof(TwoDpfold_vars)); vars->compatibility = vrna_fold_compound_TwoD(seq, structure1, structure2, &md, VRNA_OPTION_PF); crosslink(vars); return vars; } PUBLIC void destroy_TwoDpfold_variables(TwoDpfold_vars *vars) { if (vars == NULL) return; vrna_fold_compound_free(vars->compatibility); free(vars); } vrna_sol_TwoD_pf_t * TwoDpfoldList(TwoDpfold_vars *vars, int distance1, int distance2) { vrna_sol_TwoD_pf_t *sol; sol = vrna_pf_TwoD(vars->compatibility, distance1, distance2); crosslink(vars); return sol; }
LAGraph_pagerank3b.c
//------------------------------------------------------------------------------ // LAGraph_pagerank3b: pagerank using a real semiring //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ // LAGraph_pagerank3b: Alternative PageRank implementation using a real // semiring. // // This algorithm follows the specification given in the GAP Benchmark Suite: // https://arxiv.org/abs/1508.03619 // For fastest results, the input matrix should be GrB_FP32, stored in // GxB_BY_COL format. #include "LAGraph.h" #define LAGRAPH_FREE_ALL { \ GrB_free(&transpose_desc); \ GrB_free(&invmask_desc); \ GrB_free(&A); \ GrB_free(&G); \ GrB_free(&grb_d_out); \ GrB_free(&importance_vec); \ GrB_free(&grb_pr); \ }; // uncomment this to see the intermidiate resluts; lots of prints!! //#undef NDEBUG // uncomment this to see the timing info #define PRINT_TIMING_INFO GrB_Info LAGraph_pagerank3b // PageRank definition ( GrB_Vector *result, // output: array of LAGraph_PageRank structs GrB_Matrix A_input, // binary input graph, not modified float damping_factor, // damping factor unsigned long itermax, // maximum number of iterations int* iters // output: number of iterations taken ) { GrB_Info info; GrB_Index n; GrB_Descriptor invmask_desc = NULL ; GrB_Descriptor transpose_desc = NULL ; GrB_Vector grb_d_out = NULL ; GrB_Matrix A = NULL ; #ifdef PRINT_TIMING_INFO // start the timer double tic [2] ; LAGraph_tic (tic) ; #endif GrB_Vector importance_vec = NULL ; GrB_Vector grb_pr = NULL; GrB_Matrix G = NULL ; // a dense row of zeros zeroes(1,n) GrB_Index ncols ; //number of columnns LAGRAPH_OK(GrB_Matrix_ncols(&ncols , A_input)); LAGRAPH_OK(GrB_Matrix_nrows(&n, A_input)); GrB_Index nvals; LAGRAPH_OK(GrB_Matrix_nvals(&nvals, A_input)); if (ncols != n) { return (GrB_DIMENSION_MISMATCH) ; } LAGRAPH_OK(GrB_Matrix_new (&G, GrB_FP32, n, n)); LAGRAPH_OK(GrB_Matrix_new (&A, GrB_FP32, n, n)); LAGRAPH_OK(GxB_set (A, GxB_FORMAT, GxB_BY_COL)); // G is zeros in last row for (GrB_Index c = 0; c < n; c++){ LAGRAPH_OK(GrB_Matrix_setElement (G, 0.0, n-1, c)); } #ifndef NDEBUG int print_size = 5; //number of entries get printed print_size = (print_size > n)? n : print_size; // GxB_print (G, 3) ; #endif // A = A_input + G; LAGRAPH_OK(GrB_eWiseAdd (A, NULL, NULL, GrB_PLUS_FP32, A_input, G, NULL)); GrB_free (&G) ; #ifndef NDEBUG // GxB_print (A, 3) ; #endif // Create complement descriptor LAGRAPH_OK(GrB_Descriptor_new(&invmask_desc)); LAGRAPH_OK(GrB_Descriptor_set(invmask_desc, GrB_MASK, GrB_SCMP)); // Create transpose descriptor LAGRAPH_OK(GrB_Descriptor_new(&transpose_desc)); LAGRAPH_OK(GrB_Descriptor_set(transpose_desc, GrB_INP0, GrB_TRAN)); LAGRAPH_OK(GrB_Descriptor_set(transpose_desc, GrB_OUTP, GrB_REPLACE)); // Matrix A row sum // Stores the outbound degrees of all vertices LAGRAPH_OK(GrB_Vector_new(&grb_d_out, GrB_FP32, n)); LAGRAPH_OK(GrB_reduce( grb_d_out, NULL, NULL, GxB_PLUS_FP32_MONOID, A, NULL )); #ifndef NDEBUG GxB_print (grb_d_out, 1) ; // GxB_print (A, 3) ; #endif // Iteration // Initialize PR vector LAGRAPH_OK(GrB_Vector_new(&grb_pr, GrB_FP32, n)); LAGRAPH_OK(GrB_Vector_new(&importance_vec, GrB_FP32, n)); // Teleport value const float teleport = (1 - damping_factor) / n; float tol = 1e-4; float rdiff = 1 ; // first iteration is always done GrB_Type type = GrB_FP32 ; GrB_Index *dI = NULL ; float *d_sp= NULL ; GrB_Index d_nvals; GrB_Index d_n; // d_sp <----- grb_d_out || export LAGRAPH_OK (GxB_Vector_export (&grb_d_out, &type, &d_n, &d_nvals, &dI, (void **) (&d_sp), NULL)) ; // dens d_out float *d_out = (float *) LAGraph_calloc (n, sizeof(float)); int nthreads = LAGraph_get_nthreads ( ) ; nthreads = LAGRAPH_MIN (n , nthreads) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t i = 0 ; i < d_nvals; i++){ GrB_Index ind = (GrB_Index) dI[i]; d_out [ind] = d_sp [i]; } free (d_sp); free (dI); #ifndef NDEBUG for (int i = 0 ; i < print_size; i++){ printf("d_out [%d]=%ld\n", i, d_out [i]); } #endif // initializing pr float *pr = (float *) malloc (n*sizeof(float)); #pragma omp parallel for num_threads(nthreads) schedule(static) for (int i = 0; i < n ; i++){ pr [i] = 1.0/n; } #ifndef NDEBUG for (int i = 0 ; i < print_size ; i++){ printf("pr[%d]=%f\n", i, pr [i]); } #endif float *oldpr = (float *) malloc (n*sizeof(float)); //initailze the dense indices GrB_Index *I = LAGraph_malloc(n, sizeof(GrB_Index)); #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index j = 0; j < n; j++){ I[j] = j; } #ifdef PRINT_TIMING_INFO // stop the timer double t1 = LAGraph_toc (tic); printf ("\ninitialization time: %12.6e (sec)\n",t1); LAGraph_tic (tic); #endif for ((*iters) = 0 ; (*iters) < itermax && rdiff > tol ; (*iters)++) { // oldpr = pr; deep copy //GrB_Vector_dup(&oldpr, pr); #pragma omp parallel for num_threads(nthreads) schedule(static) for (int i = 0; i < n ; i++){ oldpr [i] = pr [i]; } // Importance calculation #pragma omp parallel for num_threads(nthreads) schedule(static) for (int i = 0 ; i < n; i++){ if (d_out [i] != 0){ pr [i] = damping_factor * pr [i] / d_out [i]; } else{ pr [i] = 0; } } #ifndef NDEBUG for (int i = 0 ; i < print_size; i++){ printf (" pr [%d] = %f\n", i, pr [i]); } #endif // importance_vec <----- pr LAGRAPH_OK (GxB_Vector_import (&importance_vec, GrB_FP32, n, n, &I, (void **) (&pr), NULL)) ; #ifndef NDEBUG printf ("after importance_vec import\n"); GxB_print (importance_vec, 2) ; #endif // Calculate total PR of all inbound vertices // importance_vec = A' * importance_vec LAGRAPH_OK(GrB_mxv( importance_vec, NULL, NULL, GxB_PLUS_TIMES_FP32, A, importance_vec, transpose_desc )); #ifndef NDEBUG printf ("==============2\n"); printf ("after mxv\n"); GxB_print (importance_vec, 1) ; #endif GrB_Index nvals_exp; // pr <----- importance_vec GrB_Type ivtype; LAGRAPH_OK (GxB_Vector_export (&importance_vec, &ivtype, &n, &nvals_exp, &I, (void **) (&pr), NULL)) ; // assert (nvals_exp == n ); // PageRank summarization // Add teleport, importance_vec, and dangling_vec components together // pr = (1-df)/n + pr #pragma omp parallel for num_threads(nthreads) schedule(static) for (int i = 0 ; i < n; i++){ pr [i] += teleport; } #ifndef NDEBUG for (int i = 0 ; i < print_size; i++){ printf (" pr [%d] = %f\n", i, pr [i]); } #endif //---------------------------------------------------------------------- // rdiff = sum ((oldpr-pr).^2) //---------------------------------------------------------------------- rdiff = 0; // norm (oldpr pr, 1) #pragma omp parallel for num_threads(nthreads) reduction(+:rdiff) for (int i = 0 ; i < n; i++){ float d = (oldpr [i] - pr [i]); d = (d > 0 ? d : -d); //abs(d) rdiff += d; } #ifndef NDEBUG printf("---------------------------iters %d rdiff=%f\n",*iters, rdiff); #endif } #ifdef PRINT_TIMING_INFO // stop the timer double t2 = LAGraph_toc (tic); printf ("compuatatin time: %12.6e (sec) ratio (comp/init): %f\n\n", t2, t2/t1); #endif GrB_Index *prI = LAGraph_malloc(n, sizeof(GrB_Index)); // grb_pr<----- pr || import back LAGRAPH_OK (GxB_Vector_import (&grb_pr, GrB_FP32, n, n, &I, (void **) (&pr), NULL)) ; (*result) = grb_pr; free(I); free (oldpr); return (GrB_SUCCESS); }
GB_unop__identity_uint8_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint8_uint16 // op(A') function: GB_unop_tran__identity_uint8_uint16 // C type: uint8_t // A type: uint16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint8_uint16 ( uint8_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_loop.h
// -*- C++ -*- // Copyright (C) 2007-2015 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/omp_loop.h * @brief Parallelization of embarrassingly parallel execution by * means of an OpenMP for loop. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_OMP_LOOP_H #define _GLIBCXX_PARALLEL_OMP_LOOP_H 1 #include <omp.h> #include <parallel/settings.h> #include <parallel/basic_iterator.h> #include <parallel/base.h> namespace __gnu_parallel { /** @brief Embarrassingly parallel algorithm for random access * iterators, using an OpenMP for loop. * * @param __begin Begin iterator of element sequence. * @param __end End iterator of element sequence. * @param __o User-supplied functor (comparator, predicate, adding * functor, etc.). * @param __f Functor to @a process an element with __op (depends on * desired functionality, e. g. for std::for_each(), ...). * @param __r Functor to @a add a single __result to the already * processed elements (depends on functionality). * @param __base Base value for reduction. * @param __output Pointer to position where final result is written to * @param __bound Maximum number of elements processed (e. g. for * std::count_n()). * @return User-supplied functor (that may contain a part of the result). */ template<typename _RAIter, typename _Op, typename _Fu, typename _Red, typename _Result> _Op __for_each_template_random_access_omp_loop(_RAIter __begin, _RAIter __end, _Op __o, _Fu& __f, _Red __r, _Result __base, _Result& __output, typename std::iterator_traits<_RAIter>::difference_type __bound) { typedef typename std::iterator_traits<_RAIter>::difference_type _DifferenceType; _DifferenceType __length = __end - __begin; _ThreadIndex __num_threads = __gnu_parallel::min<_DifferenceType> (__get_max_threads(), __length); _Result *__thread_results; # pragma omp parallel num_threads(__num_threads) { # pragma omp single { __num_threads = omp_get_num_threads(); __thread_results = new _Result[__num_threads]; for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __thread_results[__i] = _Result(); } _ThreadIndex __iam = omp_get_thread_num(); #pragma omp for schedule(dynamic, _Settings::get().workstealing_chunk_size) for (_DifferenceType __pos = 0; __pos < __length; ++__pos) __thread_results[__iam] = __r(__thread_results[__iam], __f(__o, __begin+__pos)); } //parallel for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __output = __r(__output, __thread_results[__i]); delete [] __thread_results; // Points to last element processed (needed as return value for // some algorithms like transform). __f._M_finish_iterator = __begin + __length; return __o; } } // end namespace #endif /* _GLIBCXX_PARALLEL_OMP_LOOP_H */
ellipticSEMFEMSetup.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" typedef struct{ dfloat VX; dfloat VY; dlong localId; hlong globalId; }FEMverts_t; typedef struct { dlong localId; hlong globalId; int ownerRank; }parallelNode_t; // compare on global owners int parallelCompareOwnersAndGlobalId(const void *a, const void *b); // compare on global indices int parallelCompareGlobalId(const void *a, const void *b); // compare xy coordinates int parallelCompareFEMvertsLocation(const void *a, const void *b){ dfloat NODETOL = 1e-6; FEMverts_t *fa = (FEMverts_t*) a; FEMverts_t *fb = (FEMverts_t*) b; if(fa->VX < fb->VX - NODETOL) return -1; if(fa->VX > fb->VX + NODETOL) return +1; if(fa->VY < fb->VY - NODETOL) return -1; if(fa->VY > fb->VY + NODETOL) return +1; return 0; } // compare local id int parallelCompareFEMvertsLocalId(const void *a, const void *b){ FEMverts_t *fa = (FEMverts_t*) a; FEMverts_t *fb = (FEMverts_t*) b; if(fa->localId < fb->localId) return -1; if(fa->localId > fb->localId) return +1; return 0; } int parallelCompareRowColumn(const void *a, const void *b); void BuildFEMMatrixTri2D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixTet3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixHex3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void ellipticSEMFEMSetup(elliptic_t *elliptic, precon_t* precon, dfloat lambda) { setupAide options = elliptic->options; if (!(options.compareArgs("DISCRETIZATION", "CONTINUOUS"))) { printf("SEMFEM is supported for CONTINUOUS only\n"); MPI_Barrier(elliptic->mesh->comm); MPI_Finalize(); exit(0); } mesh_t* mesh = elliptic->mesh; //original mesh // mesh_t* pmesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //partially assembled fem mesh (result of projecting sem element to larger space) mesh_t* pmesh = new mesh_t[1]; // precon->femMesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //full fem mesh precon->femMesh = new mesh_t[1]; mesh_t *femMesh = precon->femMesh; memcpy(pmesh ,mesh,sizeof(mesh_t)); memcpy(femMesh,mesh,sizeof(mesh_t)); if (elliptic->elementType==TRIANGLES) { //set semfem nodes as the grid points pmesh->Np = mesh->NpFEM; pmesh->r = mesh->rFEM; pmesh->s = mesh->sFEM; //count number of face nodes in the semfem element dfloat NODETOL = 1e-6; pmesh->Nfp=0; for (int n=0;n<pmesh->Np;n++) if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->Nfp++; //remake the faceNodes array pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int)); int f0=0, f1=0, f2=0; for (int n=0;n<pmesh->Np;n++) { if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n; if (fabs(pmesh->r[n]+pmesh->s[n])<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n; if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n; } //remake vertexNodes array pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int)); for(int n=0;n<pmesh->Np;++n){ if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL) pmesh->vertexNodes[0] = n; if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL) pmesh->vertexNodes[1] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)<NODETOL) pmesh->vertexNodes[2] = n; } // connect elements using parallel sort meshParallelConnect(pmesh); // compute physical (x,y) locations of the element nodes meshPhysicalNodesTri2D(pmesh); // free(sendBuffer); meshHaloSetup(pmesh); // connect face nodes (find trace indices) meshConnectFaceNodes2D(pmesh); // global nodes meshParallelConnectNodes(pmesh); //pmesh->globalIds is now populated } else if (elliptic->elementType==TETRAHEDRA) { //set semfem nodes as the grid points pmesh->Np = mesh->NpFEM; pmesh->r = mesh->rFEM; pmesh->s = mesh->sFEM; pmesh->t = mesh->tFEM; //count number of face nodes in the semfem element dfloat NODETOL = 1e-6; pmesh->Nfp=0; for (int n=0;n<pmesh->Np;n++) if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->Nfp++; //remake the faceNodes array pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int)); int f0=0, f1=0, f2=0, f3=0; for (int n=0;n<pmesh->Np;n++) { if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n; if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n; if (fabs(pmesh->r[n]+pmesh->s[n]+ pmesh->t[n]+1.0)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n; if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[3*pmesh->Nfp+f3++] = n; } //remake vertexNodes array pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int)); for(int n=0;n<pmesh->Np;++n){ if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[0] = n; if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[1] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[2] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]-1)*(pmesh->t[n]-1)<NODETOL) pmesh->vertexNodes[3] = n; } // connect elements using parallel sort meshParallelConnect(pmesh); // compute physical (x,y) locations of the element nodes meshPhysicalNodesTet3D(pmesh); // free(sendBuffer); meshHaloSetup(pmesh); // connect face nodes (find trace indices) meshConnectFaceNodes3D(pmesh); // global nodes meshParallelConnectNodes(pmesh); //pmesh->globalIds is now populated } //now build the full degree 1 fem mesh int femN = 1; //degree of fem approximation /* allocate space for node coordinates */ femMesh->Nelements = mesh->NelFEM*mesh->Nelements; femMesh->EToV = (hlong*) calloc(femMesh->Nelements*femMesh->Nverts, sizeof(hlong)); femMesh->EX = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); femMesh->EY = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); if (elliptic->dim==3) femMesh->EZ = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); dlong *localIds = (dlong *) calloc(femMesh->Nverts*femMesh->Nelements,sizeof(dlong)); // dlong NFEMverts = mesh->Nelements*mesh->NpFEM; for(dlong e=0;e<mesh->Nelements;++e){ for (int n=0;n<mesh->NelFEM;n++) { dlong id[femMesh->Nverts]; dlong femId = e*mesh->NelFEM*mesh->Nverts+n*mesh->Nverts; for (int i=0;i<femMesh->Nverts;i++) { //local ids in the subelement fem grid id[i] = e*mesh->NpFEM + mesh->FEMEToV[n*mesh->Nverts+i]; /* read vertex triplet for triangle */ femMesh->EToV[femId+i] = pmesh->globalIds[id[i]]; femMesh->EX[femId+i] = pmesh->x[id[i]]; femMesh->EY[femId+i] = pmesh->y[id[i]]; if (elliptic->dim==3) femMesh->EZ[femId+i] = pmesh->z[id[i]]; } switch(elliptic->elementType){ case TRIANGLES: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[2]; break; case QUADRILATERALS: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2] in a degree 1 element localIds[femId+3] = id[2]; break; case TETRAHEDRA: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[2]; localIds[femId+3] = id[3]; break; case HEXAHEDRA: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2,4,5,7,6] in a degree 1 element localIds[femId+3] = id[2]; localIds[femId+4] = id[4]; localIds[femId+5] = id[5]; localIds[femId+6] = id[7]; localIds[femId+7] = id[6]; break; } } } // connect elements using parallel sort meshParallelConnect(femMesh); switch(elliptic->elementType){ case TRIANGLES: meshLoadReferenceNodesTri2D(femMesh, femN); break; case QUADRILATERALS: meshLoadReferenceNodesQuad2D(femMesh, femN); break; case TETRAHEDRA: meshLoadReferenceNodesTet3D(femMesh, femN); break; case HEXAHEDRA: meshLoadReferenceNodesHex3D(femMesh, femN); break; } int *faceFlag = (int*) calloc(pmesh->Np*pmesh->Nfaces,sizeof(int)); for (int f=0;f<pmesh->Nfaces;f++) { for (int n=0;n<pmesh->Nfp;n++) { int id = pmesh->faceNodes[f*pmesh->Nfp+n]; faceFlag[f*pmesh->Np + id] = 1; //flag the nodes on this face } } //map from faces of fem sub-elements to the macro element face number int *femFaceMap = (int*) calloc(mesh->NelFEM*femMesh->Nfaces,sizeof(int)); for (int n=0;n<mesh->NelFEM*femMesh->Nfaces;n++) femFaceMap[n] = -1; for (int n=0;n<mesh->NelFEM;n++) { for (int f=0;f<femMesh->Nfaces;f++) { for (int face=0; face<pmesh->Nfaces;face++) { //count the nodes on this face which are on a macro face int NvertsOnFace = 0; for (int i=0;i<femMesh->Nfp;i++){ int id = femMesh->faceNodes[f*femMesh->Nfp+i]; int v = mesh->FEMEToV[n*pmesh->Nverts+id]; NvertsOnFace += faceFlag[face*pmesh->Np + v]; } if (NvertsOnFace == femMesh->Nfp) femFaceMap[n*femMesh->Nfaces+f] = face; //on macro face } } } //fill the boundary flag array femMesh->EToB = (int*) calloc(femMesh->Nelements*femMesh->Nfaces, sizeof(int)); for (dlong e=0;e<mesh->Nelements;e++) { for (int n=0;n<mesh->NelFEM;n++) { for (int f=0;f<femMesh->Nfaces;f++) { int face = femFaceMap[n*femMesh->Nfaces+f]; if (face>-1) { femMesh->EToB[(e*mesh->NelFEM +n)*femMesh->Nfaces +f] = mesh->EToB[e*mesh->Nfaces + face]; } } } } free(faceFlag); free(femFaceMap); switch(elliptic->elementType){ case TRIANGLES: meshPhysicalNodesTri2D(femMesh); meshGeometricFactorsTri2D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes2D(femMesh); meshSurfaceGeometricFactorsTri2D(femMesh); break; case QUADRILATERALS: meshPhysicalNodesQuad2D(femMesh); meshGeometricFactorsQuad2D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes2D(femMesh); meshSurfaceGeometricFactorsQuad2D(femMesh); break; case TETRAHEDRA: meshPhysicalNodesTet3D(femMesh); meshGeometricFactorsTet3D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes3D(femMesh); meshSurfaceGeometricFactorsTet3D(femMesh); break; case HEXAHEDRA: meshPhysicalNodesHex3D(femMesh); meshGeometricFactorsHex3D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes3D(femMesh); meshSurfaceGeometricFactorsHex3D(femMesh); break; } // global nodes meshParallelConnectNodes(femMesh); dlong Ntotal = pmesh->Np*pmesh->Nelements; int verbose = options.compareArgs("VERBOSE","TRUE") ? 1:0; pmesh->maskedGlobalIds = (hlong *) calloc(Ntotal,sizeof(hlong)); memcpy(pmesh->maskedGlobalIds, pmesh->globalIds, Ntotal*sizeof(hlong)); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { //build a new mask for NpFEM>Np node sets // gather-scatter pmesh->ogs = ogsSetup(Ntotal, pmesh->globalIds, mesh->comm, verbose, mesh->device); //make a node-wise bc flag using the gsop (prioritize Dirichlet boundaries over Neumann) int *mapB = (int *) calloc(Ntotal,sizeof(int)); for (dlong e=0;e<pmesh->Nelements;e++) { for (int n=0;n<pmesh->Np;n++) mapB[n+e*pmesh->Np] = 1E9; for (int f=0;f<pmesh->Nfaces;f++) { int bc = pmesh->EToB[f+e*pmesh->Nfaces]; if (bc>0) { for (int n=0;n<pmesh->Nfp;n++) { int BCFlag = elliptic->BCType[bc]; int fid = pmesh->faceNodes[n+f*pmesh->Nfp]; mapB[fid+e*pmesh->Np] = mymin(BCFlag,mapB[fid+e*pmesh->Np]); } } } } ogsGatherScatter(mapB, ogsInt, ogsMin, pmesh->ogs); //use the bc flags to find masked ids for (dlong n=0;n<pmesh->Nelements*pmesh->Np;n++) { if (mapB[n] == 1) { //Dirichlet boundary pmesh->maskedGlobalIds[n] = 0; } } free(mapB); } else { //mask using the original mask for (dlong n=0;n<elliptic->Nmasked;n++) pmesh->maskedGlobalIds[elliptic->maskIds[n]] = 0; } //build masked gs handle precon->FEMogs = ogsSetup(Ntotal, pmesh->maskedGlobalIds, mesh->comm, verbose, mesh->device); // number of degrees of freedom on this rank (after gathering) hlong Ngather = precon->FEMogs->Ngather; // create a global numbering system hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); int *owner = (int *) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id hlong *globalStarts = (hlong *) calloc(mesh->size+1,sizeof(hlong)); MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); for(int r=0;r<mesh->size;++r) globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; //use the offsets to set a consecutive global numbering for (dlong n =0;n<precon->FEMogs->Ngather;n++) { globalIds[n] = n + globalStarts[mesh->rank]; owner[n] = mesh->rank; } //scatter this numbering to the original nodes hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, precon->FEMogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, precon->FEMogs); free(globalIds); free(owner); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { //dont need these anymore free(pmesh->vmapM); free(pmesh->vmapP); free(pmesh->mapP); //maybe more cleanup can go here } if (elliptic->elementType==TRIANGLES) { //build stiffness matrices femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); for (int n=0;n<femMesh->Np;n++) { for (int m=0;m<femMesh->Np;m++) { for (int k=0;k<femMesh->Np;k++) { for (int l=0;l<femMesh->Np;l++) { femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; } } } } } else if (elliptic->elementType==TETRAHEDRA) { //build stiffness matrices femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sst = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Str = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sts = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Stt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); for (int n=0;n<femMesh->Np;n++) { for (int m=0;m<femMesh->Np;m++) { for (int k=0;k<femMesh->Np;k++) { for (int l=0;l<femMesh->Np;l++) { femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Srt[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Sst[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; femMesh->Str[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sts[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Stt[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; } } } } } if (mesh->rank==0) printf("Building full SEMFEM matrix..."); fflush(stdout); // Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = femMesh->Np*femMesh->Np*femMesh->Nelements; dlong cnt =0; nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); //Build unassembed non-zeros switch(elliptic->elementType){ case TRIANGLES: BuildFEMMatrixTri2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case QUADRILATERALS: BuildFEMMatrixQuad2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case TETRAHEDRA: BuildFEMMatrixTet3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case HEXAHEDRA: BuildFEMMatrixHex3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr+0); MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n=0;n<cnt;++n) AsendCounts[sendNonZeros[n].ownerRank]++; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather dlong nnz = 0; for(int r=0;r<mesh->size;++r){ AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; nnz += ArecvCounts[r]; } nonZero_t *A = (nonZero_t*) calloc(nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, A, ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort(A, nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n=1;n<nnz;++n){ if(A[n].row == A[cnt].row && A[n].col == A[cnt].col){ A[cnt].val += A[n].val; } else{ ++cnt; A[cnt] = A[n]; } } if (nnz) cnt++; nnz = cnt; if(mesh->rank==0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); hlong *Rows = (hlong *) calloc(nnz, sizeof(hlong)); hlong *Cols = (hlong *) calloc(nnz, sizeof(hlong)); dfloat *Vals = (dfloat*) calloc(nnz,sizeof(dfloat)); for (dlong n=0;n<nnz;n++) { Rows[n] = A[n].row; Cols[n] = A[n].col; Vals[n] = A[n].val; } free(A); precon->parAlmond = parAlmond::Init(mesh->device, mesh->comm, options); parAlmond::AMGSetup(precon->parAlmond, globalStarts, nnz, Rows, Cols, Vals, elliptic->allNeumann, elliptic->allNeumannPenalty); free(Rows); free(Cols); free(Vals); if (options.compareArgs("VERBOSE", "TRUE")) parAlmond::Report(precon->parAlmond); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { // //tell parAlmond not to gather this level (its done manually) // agmgLevel *baseLevel = precon->parAlmond->levels[0]; // baseLevel->gatherLevel = false; // baseLevel->weightedInnerProds = false; // build interp and anterp dfloat *SEMFEMAnterp = (dfloat*) calloc(mesh->NpFEM*mesh->Np, sizeof(dfloat)); for(int n=0;n<mesh->NpFEM;++n){ for(int m=0;m<mesh->Np;++m){ SEMFEMAnterp[n+m*mesh->NpFEM] = mesh->SEMFEMInterp[n*mesh->Np+m]; } } mesh->o_SEMFEMInterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),mesh->SEMFEMInterp); mesh->o_SEMFEMAnterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),SEMFEMAnterp); free(SEMFEMAnterp); precon->o_rFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat)); precon->o_zFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat)); precon->o_GrFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat)); precon->o_GzFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat)); } else { // //tell parAlmond to gather this level // agmgLevel *baseLevel = precon->parAlmond->levels[0]; // baseLevel->gatherLevel = true; parAlmond::multigridLevel *baseLevel = precon->parAlmond->levels[0]; precon->rhsG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat)); precon->xG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat)); precon->o_rhsG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat)); precon->o_xG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat)); // baseLevel->Srhs = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat)); // baseLevel->Sx = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat)); // baseLevel->o_Srhs = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat)); // baseLevel->o_Sx = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat)); // baseLevel->weightedInnerProds = false; // baseLevel->gatherArgs = (void **) calloc(3,sizeof(void*)); // baseLevel->gatherArgs[0] = (void *) elliptic; // baseLevel->gatherArgs[1] = (void *) precon->FEMogs; //use the gs made from the partial gathered femgrid // baseLevel->gatherArgs[2] = (void *) &(baseLevel->o_Sx); // baseLevel->scatterArgs = baseLevel->gatherArgs; // baseLevel->device_gather = ellipticGather; // baseLevel->device_scatter = ellipticScatter; } } void BuildFEMMatrixTri2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int n=0;n<femMesh->Np;n++) { dlong idn = localIds[e*femMesh->Np + n]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int m=0;m<femMesh->Np;m++) { dlong idm = localIds[e*femMesh->Np + m]; if (globalNumbering[idm]<0) continue; //skip masked nodes dfloat val = 0.; dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID]; dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID]; dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID]; dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID]; val += Grr*femMesh->Srr[m+n*femMesh->Np]; val += Grs*femMesh->Srs[m+n*femMesh->Np]; val += Grs*femMesh->Ssr[m+n*femMesh->Np]; val += Gss*femMesh->Sss[m+n*femMesh->Np]; val += J*lambda*femMesh->MM[m+n*femMesh->Np]; dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int ny=0;ny<femMesh->Nq;ny++) { for (int nx=0;nx<femMesh->Nq;nx++) { dlong idn = localIds[e*femMesh->Np + nx+ny*femMesh->Nq]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int my=0;my<femMesh->Nq;my++) { for (int mx=0;mx<femMesh->Nq;mx++) { dlong idm = localIds[e*femMesh->Np + mx+my*femMesh->Nq]; if (globalNumbering[idm]<0) continue; //skip masked nodes int id; dfloat val = 0.; if (ny==my) { for (int k=0;k<femMesh->Nq;k++) { id = k+ny*femMesh->Nq; dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np]; val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq]; } } id = mx+ny*femMesh->Nq; dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq]; id = nx+my*femMesh->Nq; dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq]; if (nx==mx) { for (int k=0;k<femMesh->Nq;k++) { id = nx+k*femMesh->Nq; dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np]; val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq]; } } if ((nx==mx)&&(ny==my)) { id = nx + ny*femMesh->Nq; dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np]; val += JW*lambda; } dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } } } void BuildFEMMatrixTet3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID]; dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID]; dfloat Grt = femMesh->ggeo[e*femMesh->Nggeo + G02ID]; dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID]; dfloat Gst = femMesh->ggeo[e*femMesh->Nggeo + G12ID]; dfloat Gtt = femMesh->ggeo[e*femMesh->Nggeo + G22ID]; dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID]; for (int n=0;n<femMesh->Np;n++) { dlong idn = localIds[e*femMesh->Np + n]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int m=0;m<femMesh->Np;m++) { dlong idm = localIds[e*femMesh->Np + m]; if (globalNumbering[idm]<0) continue; //skip masked nodes dfloat val = 0.; val += Grr*femMesh->Srr[m+n*femMesh->Np]; val += Grs*femMesh->Srs[m+n*femMesh->Np]; val += Grt*femMesh->Srt[m+n*femMesh->Np]; val += Grs*femMesh->Ssr[m+n*femMesh->Np]; val += Gss*femMesh->Sss[m+n*femMesh->Np]; val += Gst*femMesh->Sst[m+n*femMesh->Np]; val += Grt*femMesh->Str[m+n*femMesh->Np]; val += Gst*femMesh->Sts[m+n*femMesh->Np]; val += Gtt*femMesh->Stt[m+n*femMesh->Np]; val += J*lambda*femMesh->MM[m+n*femMesh->Np]; dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } void BuildFEMMatrixHex3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int nz=0;nz<femMesh->Nq;nz++) { for (int ny=0;ny<femMesh->Nq;ny++) { for (int nx=0;nx<femMesh->Nq;nx++) { dlong nn = nx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dlong idn = localIds[e*femMesh->Np + nn]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int mz=0;mz<femMesh->Nq;mz++) { for (int my=0;my<femMesh->Nq;my++) { for (int mx=0;mx<femMesh->Nq;mx++) { dlong mm = mx+my*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dlong idm = localIds[e*femMesh->Np + mm]; if (globalNumbering[idm]<0) continue; //skip masked nodes int id; dfloat val = 0.; if ((ny==my)&&(nz==mz)) { for (int k=0;k<femMesh->Nq;k++) { id = k+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np]; val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq]; } } if (nz==mz) { id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq]; id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq]; } if (ny==my) { id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np]; val += Grt*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq]; id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np]; val += Gst*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq]; } if ((nx==mx)&&(nz==mz)) { for (int k=0;k<femMesh->Nq;k++) { id = nx+k*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np]; val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq]; } } if (nx==mx) { id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np]; val += Gst*femMesh->D[ny+my*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq]; id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dfloat Gts = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np]; val += Gts*femMesh->D[my+ny*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq]; } if ((nx==mx)&&(ny==my)) { for (int k=0;k<femMesh->Nq;k++) { id = nx+ny*femMesh->Nq+k*femMesh->Nq*femMesh->Nq; dfloat Gtt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G22ID*femMesh->Np]; val += Gtt*femMesh->D[nz+k*femMesh->Nq]*femMesh->D[mz+k*femMesh->Nq]; } } if ((nx==mx)&&(ny==my)&&(nz==mz)) { id = nx + ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np]; val += JW*lambda; } // pack non-zero dfloat nonZeroThreshold = 1e-7; if (fabs(val) >= nonZeroThreshold) { #pragma omp critical { A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } } } } }
GB_unop__identity_int64_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_int8) // op(A') function: GB (_unop_tran__identity_int64_int8) // C type: int64_t // A type: int8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = (int64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_int8) ( int64_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB112-linear-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* omp for loop is allowed to use the linear clause, an OpenMP 4.5 addition. */ #include <stdio.h> #include <omp.h> int main() { int len = 100; double a[len]; double b[len]; double c[len]; int i; int j = 0; #pragma omp parallel for private (i) for (i = 0; i <= len - 1; i += 1) { a[i] = ((double )i) / 2.0; b[i] = ((double )i) / 3.0; c[i] = ((double )i) / 7.0; } for (i = 0; i <= len - 1; i += 1) { c[j] += a[i] * b[i]; j++; } printf("c[50]=%f\n",c[50]); return 0; }
mlp_example_bf16_amx_numa.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas, Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <libxsmm_sync.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif #include <numa.h> /* include c-based dnn library */ #include "../common/dnn_common.h" #define CHECK_L1 #define OVERWRITE_DOUTPUT_BWDUPD #define _mm512_load_fil(A) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepi16_epi32(_mm256_loadu_si256((__m256i*)(A))),16)) #define _mm512_store_fil(A,B) _mm256_storeu_si256((__m256i*)(A), (__m256i)_mm512_cvtneps_pbh((B))) LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } LIBXSMM_INLINE void my_init_buf_bf16(libxsmm_bfloat16* buf, size_t size, int initPos, int initOne) { int i; zero_buf_bf16(buf, size); for (i = 0; i < (int)size; ++i) { libxsmm_bfloat16_hp tmp; tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); buf[i] = tmp.i[1]; } } #if 0 LIBXSMM_INLINE void my_matrix_copy_KCCK_to_KCCK_vnni(float *src, float *dst, int C, int K, int bc, int bk) { int k1, k2, c1, c2; int kBlocks = K/bk; int cBlocks = C/bc; LIBXSMM_VLA_DECL(4, float, real_src, src, cBlocks, bc, bk); LIBXSMM_VLA_DECL(5, float, real_dst, dst, cBlocks, bc/2, bk, 2); for (k1 = 0; k1 < kBlocks; k1++) { for (c1 = 0; c1 < cBlocks; c1++) { for (c2 = 0; c2 < bc; c2++) { for (k2 = 0; k2 < bk; k2++) { LIBXSMM_VLA_ACCESS(5, real_dst, k1, c1, c2/2, k2, c2%2, cBlocks, bc/2, bk, 2) = LIBXSMM_VLA_ACCESS(4, real_src, k1, c1, c2, k2, cBlocks, bc, bk); } } } } } #endif typedef enum my_eltwise_fuse { MY_ELTWISE_FUSE_NONE = 0, MY_ELTWISE_FUSE_BIAS = 1, MY_ELTWISE_FUSE_RELU = 2, MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU } my_eltwise_fuse; typedef enum my_pass { MY_PASS_FWD = 1, MY_PASS_BWD_D = 2, MY_PASS_BWD_W = 4, MY_PASS_BWD = 6 } my_pass; typedef struct my_opt_config { libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; float lr; size_t scratch_size; libxsmm_barrier* barrier; } my_opt_config; typedef struct my_smax_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier* barrier; } my_smax_fwd_config; typedef struct my_smax_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; float loss_weight; libxsmm_barrier* barrier; } my_smax_bwd_config; typedef struct my_fc_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint fwd_bf; libxsmm_blasint fwd_2d_blocking; libxsmm_blasint fwd_col_teams; libxsmm_blasint fwd_row_teams; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_bsmmfunction fwd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd2; libxsmm_bmmfunction_reducebatch_strd gemm_fwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd4; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd5; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd6; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd7; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd8; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_relu_kernel; libxsmm_meltwfunction_unary fwd_sigmoid_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_zero_kernel; libxsmm_meltwfunction_unary fwd_copy_bf16fp32_kernel; libxsmm_meltwfunction_unary fwd_colbcast_bf16fp32_copy_kernel; } my_fc_fwd_config; typedef struct my_fc_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint bwd_bf; libxsmm_blasint bwd_2d_blocking; libxsmm_blasint bwd_col_teams; libxsmm_blasint bwd_row_teams; libxsmm_blasint upd_bf; libxsmm_blasint upd_2d_blocking; libxsmm_blasint upd_col_teams; libxsmm_blasint upd_row_teams; libxsmm_blasint ifm_subtasks; libxsmm_blasint ofm_subtasks; size_t scratch_size; size_t doutput_scratch_mark; libxsmm_barrier* barrier; libxsmm_bsmmfunction bwd_config_kernel; libxsmm_bsmmfunction upd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd2; libxsmm_bmmfunction_reducebatch_strd gemm_bwd3; libxsmm_bsmmfunction_reducebatch_strd gemm_upd; libxsmm_bsmmfunction_reducebatch_strd gemm_upd2; libxsmm_bmmfunction_reducebatch_strd gemm_upd3; libxsmm_meltwfunction_unary bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_cvtfp32bf16 upd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary bwd_relu_kernel; libxsmm_meltwfunction_unary bwd_zero_kernel; libxsmm_meltwfunction_unary upd_zero_kernel; libxsmm_meltwfunction_unary delbias_reduce_kernel; libxsmm_meltwfunction_unary vnni_to_vnniT_kernel; libxsmm_meltwfunction_unary norm_to_normT_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel; } my_fc_bwd_config; typedef struct my_numa_thr_cfg { int thr_s; int thr_e; int *blocksOFm_s; int *blocksOFm_e; int *blocksIFm_s; int *blocksIFm_e; libxsmm_bfloat16 **scratch; size_t *layer_size; libxsmm_bfloat16 **bwd_d_scratch; size_t *bwd_d_layer_size; libxsmm_bfloat16 **bwd_w_scratch; size_t *bwd_w_layer_size; } my_numa_thr_cfg; my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_fwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero = bk*bn; libxsmm_blasint ld_upconvert = K; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; libxsmm_meltw_flags fusion_flags; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); libxsmm_blasint unroll_hint; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ if (threads == 16) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 8; } else { res.fwd_bf = 1; res.fwd_2d_blocking = 0; res.fwd_col_teams = 1; res.fwd_row_teams = 1; } #if 0 res.fwd_bf = atoi(getenv("FWD_BF")); res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING")); res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS")); res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.C/res.bc)/res.fwd_bf; res.fwd_config_kernel = libxsmm_bsmmdispatch(res.bk, res.bn, res.bc, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if ( res.fwd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP fwd_config_kernel failed. Bailing...!\n"); exit(-1); } res.gemm_fwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_fwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n"); exit(-1); } res.gemm_fwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_fwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_fwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_fwd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd3 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_OVERWRITE_C; res.gemm_fwd4 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd4 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd4 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_RELU_OVERWRITE_C; res.gemm_fwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd5 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd5 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_SIGM_OVERWRITE_C; res.gemm_fwd6 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd6 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd6 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_RELU_OVERWRITE_C; res.gemm_fwd7 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd7 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd7 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_SIGM_OVERWRITE_C; res.gemm_fwd8 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd8 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd8 failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.fwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_cvtfp32bf16_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU); if ( res.fwd_cvtfp32bf16_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_relu_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_sigmoid_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_SIGMOID); if ( res.fwd_sigmoid_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_sigmoid_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if ( res.tilerelease_kernel == NULL ) { fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bk, 1, &ld_zero, &ld_zero, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.fwd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_colbcast_bf16fp32_copy_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY ); if ( res.fwd_colbcast_bf16fp32_copy_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_colbcast_bf16fp32_copy_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_copy_bf16fp32_kernel = libxsmm_dispatch_meltw_unary(K, 1, &ld_upconvert, &ld_upconvert, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_copy_bf16fp32_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_copy_bf16fp32_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = sizeof(float) * LIBXSMM_MAX(res.K * res.N, res.threads * LIBXSMM_MAX(res.bk * res.bn, res.K)); return res; } my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_bwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero_bwd = bc*bn; libxsmm_blasint ld_zero_upd = bk; libxsmm_blasint delbias_K = K; libxsmm_blasint delbias_N = N; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; libxsmm_blasint updM; libxsmm_blasint updN; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); libxsmm_blasint unroll_hint; size_t size_bwd_scratch; size_t size_upd_scratch; libxsmm_blasint bbk; libxsmm_blasint bbc; libxsmm_blasint ldaT = bc; libxsmm_blasint ldb_orig= bc; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ if (threads == 16) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 8; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else { res.bwd_bf = 1; res.bwd_2d_blocking = 0; res.bwd_col_teams = 1; res.bwd_row_teams = 1; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } bbk = (res.upd_2d_blocking == 1) ? bk : bk/res.ofm_subtasks; bbc = (res.upd_2d_blocking == 1) ? bc : bc/res.ifm_subtasks; #if 0 res.bwd_bf = atoi(getenv("BWD_BF")); res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING")); res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS")); res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS")); res.upd_bf = atoi(getenv("UPD_BF")); res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING")); res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS")); res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS")); res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS")); res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ /* BWD GEMM */ l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.K/res.bk)/res.bwd_bf; res.gemm_bwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &beta, &l_flags, NULL); if ( res.gemm_bwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n"); exit(-1); } res.gemm_bwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_bwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_bwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_bwd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd3 failed. Bailing...!\n"); exit(-1); } res.bwd_config_kernel = libxsmm_bsmmdispatch(res.bc, res.bn, res.bk, &ldb, &lda, &ldb, NULL, &beta, &l_tc_flags, NULL); if ( res.bwd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP bwd_config_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.bwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.bwd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn,&ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( res.bwd_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_relu_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bc, 1, &ld_zero_bwd, &ld_zero_bwd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.bwd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_zero_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernel */ res.vnni_to_vnniT_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &lda, &ldaT, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_VNNI_TO_VNNIT); if ( res.vnni_to_vnniT_kernel == NULL ) { fprintf( stderr, "JIT for TPP vnni_to_vnniT_kernel failed. Bailing...!\n"); exit(-1); } /* UPD GEMM */ lda = res.bk; ldb = res.bn; ldc = res.bk; updM = res.bk/res.ofm_subtasks; updN = res.bc/res.ifm_subtasks; l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.N/res.bn)/res.upd_bf; res.gemm_upd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_upd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n"); exit(-1); } res.gemm_upd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_upd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n"); exit(-1); } l_flags = l_flags | LIBXSMM_GEMM_FLAG_VNNI_C; res.gemm_upd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_upd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd3 failed. Bailing...!\n"); exit(-1); } res.upd_config_kernel = libxsmm_bsmmdispatch(updM, updN, res.bn, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if ( res.upd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP upd_config_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if ( res.tilerelease_kernel == NULL ) { fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.upd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_cvtfp32bf16(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_CVT_VNNI_FORMAT); if ( res.upd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.upd_zero_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ld_zero_upd, &ld_zero_upd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.upd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.delbias_reduce_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &delbias_K, &delbias_N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if( res.delbias_reduce_kernel == NULL ) { fprintf( stderr, "JIT for TPP delbias_reduce_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernels */ res.norm_to_vnni_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if ( res.norm_to_vnni_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_normT_kernel = libxsmm_dispatch_meltw_unary(bc, bn, &ldb, &ldb_orig, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT); if ( res.norm_to_normT_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ size_bwd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.N, res.threads * res.bc * res.bn) + sizeof(libxsmm_bfloat16) * res.C * res.K; size_upd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.K, res.threads * res.bc * res.bk) + sizeof(libxsmm_bfloat16) * res.threads * res.bk * res.bc + sizeof(libxsmm_bfloat16) * (res.N * (res.C + res.K)); #ifdef OVERWRITE_DOUTPUT_BWDUPD res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + sizeof(libxsmm_bfloat16) * res.N * res.K; #else res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + 2 * sizeof(libxsmm_bfloat16) * res.N * res.K; #endif res.doutput_scratch_mark = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) ; return res; } my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, float lr) { my_opt_config res; /* setting up some handle values */ res.C = C; res.K = K; res.bc = bc; res.bk = bk; res.threads = threads; res.lr = lr; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads) { my_smax_fwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float)*res.C*res.N*2);; return res; } my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, float loss_weight) { my_smax_bwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; res.loss_weight = loss_weight; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float)*res.C*res.N*2);; return res; } void my_fc_fwd_exec( my_fc_fwd_config cfg, const libxsmm_bfloat16* wt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const libxsmm_bfloat16* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer ) { const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = cfg.bc/lpb; /* const libxsmm_blasint bc = cfg.bc;*/ libxsmm_blasint use_2d_blocking = cfg.fwd_2d_blocking; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc); LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, wt_ptr, nBlocksIFm, bc_lp, cfg.bk, lpb); LIBXSMM_VLA_DECL(4, float, output_f32, (float*)scratch, nBlocksOFm, bn, bk); libxsmm_meltw_gemm_param gemm_eltwise_params; float* fp32_bias_scratch = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (float*)scratch + ltid * cfg.K : NULL; LIBXSMM_VLA_DECL(2, const libxsmm_bfloat16, bias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) bias_ptr : NULL, cfg.bk); LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32); libxsmm_meltwfunction_unary eltwise_kernel_act = cfg.fwd_cvtfp32bf16_relu_kernel; libxsmm_meltw_unary_param eltwise_params_act; libxsmm_meltwfunction_unary eltwise_kernel = cfg.fwd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_bmmfunction_reducebatch_strd_meltwfused bf16_batchreduce_kernel_zerobeta_fused_eltwise; libxsmm_meltw_unary_param copy_params; unsigned long long blocks = nBlocksIFm; libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1; if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) && ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd7; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd4; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd5; } else { bf16_batchreduce_kernel_zerobeta_fused_eltwise = NULL; } BF = cfg.fwd_bf; CB_BLOCKS = nBlocksIFm/BF; blocks = CB_BLOCKS; if (use_2d_blocking == 1) { col_teams = cfg.fwd_col_teams; row_teams = cfg.fwd_row_teams; my_row_id = ltid % row_teams; my_col_id = ltid / row_teams; N_tasks_per_thread = (nBlocksMB + col_teams-1)/col_teams; M_tasks_per_thread = (nBlocksOFm + row_teams-1)/row_teams; my_N_start = LIBXSMM_MIN( my_col_id * N_tasks_per_thread, nBlocksMB); my_N_end = LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, nBlocksMB); my_M_start = LIBXSMM_MIN( my_row_id * M_tasks_per_thread, nBlocksOFm); my_M_end = LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, nBlocksOFm); } const libxsmm_blasint ofm_start = numa_thr_cfg->blocksOFm_s[layer]; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.fwd_config_kernel(NULL, NULL, NULL); if (use_2d_blocking == 1) { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } } else { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } } else { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } else { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } cfg.tilerelease_kernel(NULL, NULL, NULL); libxsmm_barrier_wait(cfg.barrier, ltid); } void my_fc_bwd_exec( my_fc_bwd_config cfg, const libxsmm_bfloat16* wt_ptr, libxsmm_bfloat16* din_act_ptr, const libxsmm_bfloat16* dout_act_ptr, libxsmm_bfloat16* dwt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch ) { /* size variables, all const */ /* here we assume that input and output blocking is similar */ const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc/lpb; const libxsmm_blasint bk_lp = bk/lpb; const libxsmm_blasint bn_lp = bn/lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ofm2 = 0; libxsmm_blasint performed_doutput_transpose = 0; libxsmm_meltw_unary_param trans_param; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work; const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint dbias_work = nBlocksOFm; /* compute chunk size */ const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work; const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work; LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dbias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) dbias_ptr : NULL, cfg.bk); LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32); #ifdef OVERWRITE_DOUTPUT_BWDUPD libxsmm_bfloat16 *grad_output_ptr = (libxsmm_bfloat16*)dout_act_ptr; libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)scratch; #else libxsmm_bfloat16 *grad_output_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)dout_act_ptr; libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)grad_output_ptr + cfg.N * cfg.K : (libxsmm_bfloat16*)scratch; #endif LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, doutput_orig, (libxsmm_bfloat16*)dout_act_ptr, nBlocksOFm, bn, bk); libxsmm_meltw_unary_param relu_params; libxsmm_meltwfunction_unary relu_kernel = cfg.bwd_relu_kernel; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, doutput, grad_output_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, doutput_tr, tr_doutput_ptr, nBlocksMB, bn_lp, bk, lpb); libxsmm_meltwfunction_unary eltwise_kernel = cfg.bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_cvtfp32bf16 eltwise_kernel2 = cfg.upd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.bwd_config_kernel(NULL, NULL, NULL); /* Apply to doutput potential fusions */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1/nBlocksOFm; ofm1 = mb1ofm1%nBlocksOFm; relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); relu_kernel(&relu_params); /* If in UPD pass, also perform transpose of doutput */ if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { performed_doutput_transpose = 1; } libxsmm_barrier_wait(cfg.barrier, ltid); } /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ){ libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm; /* compute chunk size */ const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work; const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work; /* loop variables */ libxsmm_blasint ifm1 = 0, ifm1ofm1 = 0, mb1ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16* )din_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter_tr, (libxsmm_bfloat16*)scratch, nBlocksOFm, bk_lp, bc, lpb); float* temp_output = (float*)scratch + (cfg.C * cfg.K)/2; LIBXSMM_VLA_DECL(4, float, dinput_f32, (float*) temp_output, nBlocksIFm, bn, bc); unsigned long long blocks = nBlocksOFm; libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1; BF = cfg.bwd_bf; KB_BLOCKS = nBlocksOFm/BF; blocks = KB_BLOCKS; if (use_2d_blocking == 1) { col_teams = cfg.bwd_col_teams; row_teams = cfg.bwd_row_teams; my_row_id = ltid % row_teams; my_col_id = ltid / row_teams; N_tasks_per_thread = (nBlocksMB + col_teams-1)/col_teams; M_tasks_per_thread = (nBlocksIFm + row_teams-1)/row_teams; my_N_start = LIBXSMM_MIN( my_col_id * N_tasks_per_thread, nBlocksMB); my_N_end = LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, nBlocksMB); my_M_start = LIBXSMM_MIN( my_row_id * M_tasks_per_thread, nBlocksIFm); my_M_end = LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, nBlocksIFm); } /* transpose weight */ for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / nBlocksIFm; ifm1 = ifm1ofm1 % nBlocksIFm; trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } /* wait for transpose to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); if (use_2d_blocking == 1) { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ofm1 == 0 ) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */ if ( ofm1 == BF-1 ) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); } } } } } else { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } else { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ofm1 == 0 ) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */ if ( ofm1 == BF-1 ) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); } } } } else { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { /* number of tasks that could be run in parallel */ const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks; const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks; const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks; const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks; const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks; const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks; const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks; /* 2D blocking parameters */ libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint BF = cfg.upd_bf; /* loop variables */ libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, bfn = 0, mb1ifm1 = 0; /* Batch reduce related variables */ unsigned long long blocks = nBlocksMB/BF; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, (libxsmm_bfloat16* )in_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16*)dwt_ptr, nBlocksIFm, bc_lp, bk, lpb); /* Set up tensors for transposing/scratch before vnni reformatting dfilter */ libxsmm_bfloat16 *tr_inp_ptr = (libxsmm_bfloat16*) ((libxsmm_bfloat16*)scratch + cfg.N * cfg.K); float *dfilter_f32_ptr = (float*) ((libxsmm_bfloat16*)tr_inp_ptr + cfg.N * cfg.C); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, input_tr, (libxsmm_bfloat16*)tr_inp_ptr, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, float, dfilter_f32, (float*)dfilter_f32_ptr, nBlocksIFm, bc, bk); const libxsmm_blasint tr_out_work = nBlocksMB * nBlocksOFm; const libxsmm_blasint tr_out_chunksize = (tr_out_work % cfg.threads == 0) ? (tr_out_work / cfg.threads) : ((tr_out_work / cfg.threads) + 1); const libxsmm_blasint tr_out_thr_begin = (ltid * tr_out_chunksize < tr_out_work) ? (ltid * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_out_thr_end = ((ltid + 1) * tr_out_chunksize < tr_out_work) ? ((ltid + 1) * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_inp_work = nBlocksMB * nBlocksIFm; const libxsmm_blasint tr_inp_chunksize = (tr_inp_work % cfg.threads == 0) ? (tr_inp_work / cfg.threads) : ((tr_inp_work / cfg.threads) + 1); const libxsmm_blasint tr_inp_thr_begin = (ltid * tr_inp_chunksize < tr_inp_work) ? (ltid * tr_inp_chunksize) : tr_inp_work; const libxsmm_blasint tr_inp_thr_end = ((ltid + 1) * tr_inp_chunksize < tr_inp_work) ? ((ltid + 1) * tr_inp_chunksize) : tr_inp_work; if (use_2d_blocking == 1) { col_teams = cfg.upd_col_teams; row_teams = cfg.upd_row_teams; my_row_id = ltid % row_teams; my_col_id = ltid / row_teams; N_tasks_per_thread = (nBlocksIFm + col_teams-1)/col_teams; M_tasks_per_thread = (nBlocksOFm + row_teams-1)/row_teams; my_N_start = LIBXSMM_MIN( my_col_id * N_tasks_per_thread, nBlocksIFm); my_N_end = LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, nBlocksIFm); my_M_start = LIBXSMM_MIN( my_row_id * M_tasks_per_thread, nBlocksOFm); my_M_end = LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, nBlocksOFm); } /* Required upfront tranposes */ for (mb1ifm1 = tr_inp_thr_begin; mb1ifm1 < tr_inp_thr_end; mb1ifm1++) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, mb1, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } if (performed_doutput_transpose == 0) { for (mb1ofm1 = tr_out_thr_begin; mb1ofm1 < tr_out_thr_end; mb1ofm1++) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } libxsmm_barrier_wait(cfg.barrier, ltid); if (use_2d_blocking == 1) { ifm2 = 0; ofm2 = 0; if (BF == 1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); } } } else { for (bfn = 0; bfn < BF; bfn++) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF-1) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); } } } } } } else { if (BF == 1) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); } } else { for (bfn = 0; bfn < BF; bfn++) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF-1) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); } } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } cfg.tilerelease_kernel(NULL, NULL, NULL); } void my_opt_exec( my_opt_config cfg, libxsmm_bfloat16* wt_ptr, float* master_wt_ptr, const libxsmm_bfloat16* delwt_ptr, int start_tid, int my_tid, void* scratch ) { /* loop counters */ libxsmm_blasint i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the filters */ const libxsmm_blasint work = cfg.C * cfg.K; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); #if defined(__AVX512BW__) libxsmm_blasint iv = ( (thr_end-thr_begin)/16 ) * 16; /* compute iterations which are vectorizable */ __m512 vlr = _mm512_set1_ps( cfg.lr ); for ( i = thr_begin; i < thr_begin+iv; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( master_wt_ptr+i ), _mm512_mul_ps( vlr, _mm512_load_fil( delwt_ptr + i ) ) ); _mm512_store_fil( wt_ptr+i, newfilter ); _mm512_storeu_ps( master_wt_ptr+i, newfilter ); } for ( i = thr_begin+iv; i < thr_end; ++i ) { libxsmm_bfloat16_hp t1, t2; t1.i[0] =0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } #else for ( i = thr_begin; i < thr_end; ++i ) { libxsmm_bfloat16_hp t1, t2; t1.i[0] =0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } #endif libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_fwd_exec( my_smax_fwd_config cfg, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const libxsmm_blasint nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; libxsmm_bfloat16* poutput_bf16 = out_act_ptr; const libxsmm_bfloat16* pinput_bf16 = in_act_ptr; float* poutput_fp32 = (float*)scratch; float* pinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C); LIBXSMM_VLA_DECL(4, float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, const float, input, pinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.i[0] = 0; in.i[1] = pinput_bf16[i]; pinput_fp32[i] = in.f; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { float max = FLT_MIN; float sum_of_exp = 0.0f; img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) { max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } } /* sum exp over outputs */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) ); sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } /* scale output */ sum_of_exp = 1.0f/sum_of_exp; for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp; } } } libxsmm_barrier_wait( cfg.barrier, ltid ); /* calculate loss single threaded */ if ( ltid == 0 ) { (*loss) = 0.0f; for ( img1 = 0; img1 < Bn; ++img1 ) { for ( img2 = 0; img2 <bn; ++img2 ) { libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ); libxsmm_blasint ifm1b = ifm/bc; libxsmm_blasint ifm2b = ifm%bc; float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN; *loss = LIBXSMM_LOGF( val ); } } *loss = ((-1.0f)*(*loss))/cfg.N; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.f = poutput_fp32[i]; poutput_bf16[i] = in.i[1]; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_bwd_exec( my_smax_bwd_config cfg, libxsmm_bfloat16* delin_act_ptr, const libxsmm_bfloat16* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; float rcp_N = 1.0f/cfg.N; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const int nc_work = Bn * bn; /* compute chunk size */ const int nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const int nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const int nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; const libxsmm_bfloat16* poutput_bf16 = out_act_ptr; libxsmm_bfloat16* pdinput_bf16 = delin_act_ptr; float* poutput_fp32 = (float*)scratch; float* pdinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C); LIBXSMM_VLA_DECL(4, const float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, float, dinput, pdinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp out; out.i[0] = 0; out.i[1] = poutput_bf16[i]; poutput_fp32[i] = out.f; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight; } else { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight; } } } } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.f = pdinput_fp32[i]; pdinput_bf16[i] = in.i[1]; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void *numa_alloc_onnode_aligned(size_t size, int numa_node, int alignment_) { #if 0 int alignment = alignment_ - 1; size_t adj_size = sizeof(size_t) + alignment; void *r_ptr = NULL; void *t_ptr = numa_alloc_onnode(size + adj_size, numa_node); if (t_ptr == NULL) return NULL; r_ptr = (void *)(((size_t)t_ptr + adj_size) & ~alignment); *((size_t*)r_ptr - 1) = (size_t)r_ptr - (size_t)t_ptr; return r_ptr; #else return numa_alloc_onnode(size, numa_node); #endif } void numa_free_aligned(void *ptr, size_t size) { #if 0 if (ptr == NULL) return; void *t_ptr = (void*)((size_t*)ptr - *((size_t*)ptr - 1)); numa_free(t_ptr, size); #else numa_free(ptr, size); #endif } int setup_my_numa(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, int n_threads) { int max_nodes = numa_max_node() + 1; int max_cfg_nodes = numa_num_configured_nodes(); int max_cfg_cpus = numa_num_configured_cpus(); int max_task_cpus = numa_num_task_cpus(); my_numa_thr_cfg *numa_thr_cfg = (my_numa_thr_cfg *) malloc(sizeof(my_numa_thr_cfg) * max_cfg_nodes); printf("FWD NUMA configuration:\n"); printf("There are %d numa nodes on the system\n", max_nodes); printf("There are %d configured numa nodes on the system\n", max_cfg_nodes); printf("There are %d configured CPUs on the system\n", max_cfg_cpus); printf("There are %d CPUs asigned for the current task\n", max_task_cpus); struct bitmask* bmask = numa_bitmask_alloc(max_cfg_cpus); int thr_count = 0, i = 0; for (i = 0; i < max_cfg_nodes; i++) { numa_node_to_cpus(i, bmask); numa_thr_cfg[i].scratch = (libxsmm_bfloat16**) malloc(sizeof(libxsmm_bfloat16*) * num_layers); numa_thr_cfg[i].layer_size = (size_t*)malloc(sizeof(size_t)*num_layers); numa_thr_cfg[i].blocksOFm_s = (int*)malloc(sizeof(int)*num_layers); numa_thr_cfg[i].blocksOFm_e = (int*)malloc(sizeof(int)*num_layers); /* printf("@@@@@ node %d size %zd cpus ", i, bmask->size); size_t j = 0; for(j = 0; j < bmask->size; j++) printf("%d", numa_bitmask_isbitset(bmask, j)); printf("\n"); */ int num_threads_in_mask = 0; int t = 0; for (t = 0; t < bmask->size; t++) if (numa_bitmask_isbitset(bmask, t)) num_threads_in_mask++; int node_threads = 0; while(thr_count < n_threads && node_threads < num_threads_in_mask) { if (numa_bitmask_isbitset(bmask, thr_count)) { numa_thr_cfg[i].thr_s = thr_count; break; } thr_count++; node_threads++; } while(thr_count < n_threads && node_threads < num_threads_in_mask) { if (numa_bitmask_isbitset(bmask, thr_count)) numa_thr_cfg[i].thr_e = thr_count; thr_count++; node_threads++; } } *numa_thr_cfg_ = numa_thr_cfg; return 1; } int setup_my_numa_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) { my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int max_cfg_nodes = numa_num_configured_nodes(); int i = 0; for (i = 0; i < max_cfg_nodes; i++) { int l = 0; for (l = 0; l < num_layers; l++) { if (my_fc_fwd[l].fwd_bf > 1) { printf("@@@ NUMA ERROR: doesn't support this configuration\n"); return -1; } int thr = 0; const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk; const libxsmm_blasint nBlocksMB = my_fc_fwd[l].N / my_fc_fwd[l].bn; if (my_fc_fwd[l].fwd_2d_blocking == 1) { libxsmm_blasint row_teams = my_fc_fwd[l].fwd_row_teams; libxsmm_blasint M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams); numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm; numa_thr_cfg[i].blocksOFm_e[l] = 0; for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e && numa_thr_cfg[i].thr_s != numa_thr_cfg[i].thr_e; thr++) { libxsmm_blasint my_row_id = thr % row_teams; /* ltid */ libxsmm_blasint my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm); libxsmm_blasint my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm); numa_thr_cfg[i].blocksOFm_s[l] = (my_M_start <= numa_thr_cfg[i].blocksOFm_s[l]) ? my_M_start : numa_thr_cfg[i].blocksOFm_s[l]; numa_thr_cfg[i].blocksOFm_e[l] = (my_M_end >= numa_thr_cfg[i].blocksOFm_e[l]) ? my_M_end : numa_thr_cfg[i].blocksOFm_e[l]; } } else { numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm; numa_thr_cfg[i].blocksOFm_e[l] = 0; for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e && numa_thr_cfg[i].thr_s != numa_thr_cfg[i].thr_e; thr++) { const libxsmm_blasint work = nBlocksOFm * nBlocksMB; const libxsmm_blasint chunksize = (work % my_fc_fwd[l].threads == 0) ? (work / my_fc_fwd[l].threads) : ((work / my_fc_fwd[l].threads) + 1); const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work; const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work; int ofm_s = thr_begin / nBlocksMB; int ofm_e = thr_end / nBlocksMB; numa_thr_cfg[i].blocksOFm_s[l] = (ofm_s <= numa_thr_cfg[i].blocksOFm_s[l]) ? ofm_s : numa_thr_cfg[i].blocksOFm_s[l]; numa_thr_cfg[i].blocksOFm_e[l] = (ofm_e >= numa_thr_cfg[i].blocksOFm_e[l]) ? ofm_e : numa_thr_cfg[i].blocksOFm_e[l]; } } } } return 1; } int allocate_numa_buffers_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) { my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int max_cfg_nodes = numa_num_configured_nodes(); int i = 0, l = 0; for (i = 0; i < max_cfg_nodes; i++) { for (l = 0; l < num_layers; l++) { const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc; const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk; int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1; if (l_nBlocksOFm <= 0) continue; numa_thr_cfg[i].layer_size[l] = sizeof(libxsmm_bfloat16) * ((l_nBlocksOFm) * BOFM_shift); numa_thr_cfg[i].scratch[l] = (libxsmm_bfloat16*)numa_alloc_onnode_aligned(numa_thr_cfg[i].layer_size[l], i, 2097152); if (numa_thr_cfg[i].scratch[l] == NULL) { printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i); return -1; } } } return 1; } int copy_to_numa_buffers_fwd_inf(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd, libxsmm_bfloat16 **fil_libxsmm) { my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int max_cfg_nodes = numa_num_configured_nodes(); int i, l; #pragma omp parallel for collapse(2) private (i,l) for (i = 0; i < max_cfg_nodes; i++) { for (l = 0; l < num_layers; l++) { const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc; const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk; int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1; int j = 0; for (j = 0; j < l_nBlocksOFm ; j++) { size_t l_BOFM_shift = j * BOFM_shift; libxsmm_bfloat16 *out = numa_thr_cfg[i].scratch[l] + l_BOFM_shift; libxsmm_bfloat16 *inp = fil_libxsmm[l] + numa_thr_cfg[i].blocksOFm_s[l] * BOFM_shift + l_BOFM_shift; memcpy(out, inp, sizeof(libxsmm_bfloat16) * nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk); } } } return 1; } int main(int argc, char* argv[]) { libxsmm_bfloat16 **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm; libxsmm_bfloat16 **bias_libxsmm, **delbias_libxsmm; float **fil_master; unsigned char **relumask_libxsmm; int *label_libxsmm; my_eltwise_fuse my_fuse; my_fc_fwd_config* my_fc_fwd; my_fc_bwd_config* my_fc_bwd; my_opt_config* my_opt; my_smax_fwd_config my_smax_fwd; my_smax_bwd_config my_smax_bwd; void* scratch = NULL; size_t scratch_size = 0; #ifdef CHECK_L1 float *last_act_fwd_f32 = NULL; float *first_wt_bwdupd_f32 = NULL; #endif /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int iters = 10; /* repetitions of benchmark */ int MB = 32; /* mini-batch size, "N" */ int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */ int bn = 64; int bk = 64; int bc = 64; int *C; /* number of input feature maps, "C" */ int num_layers = 0; const char *const env_check = getenv("CHECK"); const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check)); #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double gflop = 0.0; int i, j; double act_size = 0.0; double fil_size = 0.0; float lr = 0.2f; float loss_weight = 0.1f; libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff; libxsmm_matdiff_clear(&norms_fwd); libxsmm_matdiff_clear(&norms_bwd); libxsmm_matdiff_clear(&norms_upd); libxsmm_matdiff_clear(&diff); if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 9; if (argc > i) iters = atoi(argv[i++]); if (argc > i) MB = atoi(argv[i++]); if (argc > i) fuse_type = atoi(argv[i++]); if (argc > i) type = *(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* allocate the number of channles buffer */ if ( num_layers < 1 ) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int*)malloc((num_layers+2)*sizeof(int)); for (j = 0 ; i < argc; ++i, ++j ) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers+1] = C[num_layers]; if (type != 'A' && type != 'F' && type != 'B') { printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n"); return -1; } if ( (fuse_type < 0) || (fuse_type > 5) ) { printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n"); return -1; } #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif /* print some summary */ printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n"); for (i = 0; i < num_layers; ++i ) { if (i == 0) { act_size += (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); } act_size += (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); fil_size += (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); } act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE Filter (incl. master): %10.2f MiB\n", 3.0*fil_size ); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE MLP: %10.2f MiB\n", (4.0*fil_size) + (2.0*act_size) ); /* allocate data */ act_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+2)*sizeof(libxsmm_bfloat16*) ); delact_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+1)*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers+2; ++i ) { #ifdef ACT_NUMA_INTERLEAVED act_libxsmm[i] = (libxsmm_bfloat16*)numa_alloc_interleaved( MB*C[i]*sizeof(libxsmm_bfloat16)); #else act_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152); #endif /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { delact_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152); } } fil_master = (float**) malloc( num_layers*sizeof(float*) ); fil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); delfil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers; ++i ) { fil_master[i] = (float*) libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); fil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152); delfil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152); } bias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); delbias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers; ++i ) { bias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152); delbias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152); } relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) ); for ( i = 0 ; i < num_layers; ++i ) { relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152); } label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152); /* init data */ for ( i = 0 ; i < num_layers+2; ++i ) { my_init_buf_bf16( act_libxsmm[i], MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers+1; ++i ) { my_init_buf_bf16( delact_libxsmm[i], MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { #if 0 { float *cur_fil = (float*) malloc(C[i]*C[i+1]*sizeof(float)); my_init_buf( cur_fil, C[i]*C[i+1], 0, 0 ); my_matrix_copy_KCCK_to_KCCK_vnni(cur_fil, fil_master[i], C[i], C[i+1], bc, bk); libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] ); free(cur_fil); } #else my_init_buf( fil_master[i], C[i]*C[i+1], 0, 0 ); libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] ); #endif } for ( i = 0 ; i < num_layers; ++i ) { #if 0 float *cur_fil = (float*) malloc(C[i]*C[i+1]*sizeof(float)); float *cur_fil_vnni = (float*) malloc(C[i]*C[i+1]*sizeof(float)); my_init_buf( cur_fil, C[i]*C[i+1], 0, 0 ); my_matrix_copy_KCCK_to_KCCK_vnni(cur_fil, cur_fil_vnni, C[i], C[i+1], bc, bk); libxsmm_rne_convert_fp32_bf16( cur_fil_vnni, delfil_libxsmm[i], C[i]*C[i+1] ); free(cur_fil); free(cur_fil_vnni); #else my_init_buf_bf16( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 ); #endif } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_bf16( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_bf16( delbias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] ); } zero_buf_int32( label_libxsmm, MB ); printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); if ( fuse_type == 0 ) { my_fuse = MY_ELTWISE_FUSE_NONE; } else if ( fuse_type == 1 ) { my_fuse = MY_ELTWISE_FUSE_BIAS; } else if ( fuse_type == 2 ) { my_fuse = MY_ELTWISE_FUSE_RELU; } else if ( fuse_type == 4 ) { my_fuse = MY_ELTWISE_FUSE_BIAS_RELU; } else { my_fuse = MY_ELTWISE_FUSE_NONE; } /* allocating handles */ my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) ); my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) ); my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) ); /* setting up handles + scratch */ for ( i = 0; i < num_layers; ++i ) { my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, lr ); /* let's allocate and bind scratch */ if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_scratch( scratch_size, 2097152 ); my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } } /* softmax+loss is treated as N+! layer */ my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads ); my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, loss_weight ); if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_scratch( scratch_size, 2097152 ); my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } my_numa_thr_cfg *numa_thr_cfg; setup_my_numa(&numa_thr_cfg, num_layers, nThreads); if ( type == 'F') { printf("##########################################\n"); printf("# Performance - FWD (custom-Storage) #\n"); printf("##########################################\n"); setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd); allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd); l_start = libxsmm_timer_tick(); copy_to_numa_buffers_fwd_inf(&numa_thr_cfg, num_layers, my_fc_fwd, fil_libxsmm); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif const int numa_node = numa_node_of_cpu(tid); for (j = 0; j < iters; ++j) { for ( i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *filt = numa_thr_cfg[numa_node].scratch[i]; my_fc_fwd_exec( my_fc_fwd[i], filt, act_libxsmm[i], act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i); } #ifdef USE_SOFTMAX my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss, 0, tid, scratch ); #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = 0; i < num_layers; ++i) { gflop += (2.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); /* Print some norms on last act for fwd and weights of first layer after all iterations */ last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float)); libxsmm_convert_bf16_f32( act_libxsmm[num_layers], last_act_fwd_f32, MB*C[num_layers]); libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, last_act_fwd_f32, last_act_fwd_f32, 0, 0); printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref); } if (type == 'B') { printf("##########################################\n"); printf("# Performance - BWD (custom-Storage) #\n"); printf("##########################################\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (j = 0; j < iters; ++j) { #ifdef USE_SOFTMAX my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, 0, tid, scratch ); #endif for ( i = num_layers-1; i > 0; --i) { my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch ); my_opt_exec( my_opt[i], fil_libxsmm[i], fil_master[i], delfil_libxsmm[i], 0, tid, scratch ); } my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch ); my_opt_exec( my_opt[0], fil_libxsmm[0], fil_master[0], delfil_libxsmm[0], 0, tid, scratch ); } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (4.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } gflop += (2.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0); printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); } if (type == 'A') { printf("#########################################################\n"); printf("# Unimplemented: Performance - FWD-BWD (custom-Storage) #\n"); printf("#########################################################\n"); exit(-1); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (j = 0; j < iters; ++j) { for ( i = 0; i < num_layers; ++i) { my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, NULL, 0); } #ifdef USE_SOFTMAX my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss, 0, tid, scratch ); my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, 0, tid, scratch ); #endif for ( i = num_layers-1; i > 0; --i) { my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch ); my_opt_exec( my_opt[i], fil_libxsmm[i], fil_master[i], delfil_libxsmm[i], 0, tid, scratch ); } my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch ); my_opt_exec( my_opt[0], fil_libxsmm[0], fil_master[0], delfil_libxsmm[0], 0, tid, scratch ); } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); #ifdef CHECK_L1 /* Print some norms on last act for fwd and weights of first layer after all iterations */ last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float)); first_wt_bwdupd_f32 = (float*) malloc(C[0]*C[1]*sizeof(float)); libxsmm_convert_bf16_f32( act_libxsmm[num_layers], last_act_fwd_f32, MB*C[num_layers]); #if 1 libxsmm_convert_bf16_f32( fil_libxsmm[0], first_wt_bwdupd_f32, C[0]*C[1]); libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, last_act_fwd_f32, last_act_fwd_f32, 0, 0); printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref); libxsmm_matdiff_reduce(&diff, &norms_fwd); libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, first_wt_bwdupd_f32, first_wt_bwdupd_f32, 0, 0); printf("L1 of wt[0] : %.25g\n", norms_bwd.l1_ref); libxsmm_matdiff_reduce(&diff, &norms_bwd); #else { int e = 0; FILE *fileAct, *fileWt; float *ref_last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float)); float *ref_first_wt_bwdupd_f32 = (float*) malloc(C[0]*C[1]*sizeof(float)); float *ref_first_wt_bwdupd_f32_kc = (float*) malloc(C[0]*C[1]*sizeof(float)); libxsmm_bfloat16 *first_wt_bwdupd_bf16 = (libxsmm_bfloat16*) malloc(C[0]*C[1]*sizeof(libxsmm_bfloat16)); fileAct = fopen("acts.txt","r"); if (fileAct != NULL) { int bufferLength = 255; char buffer[bufferLength]; e = 0; while(fgets(buffer, bufferLength, fileAct)) { ref_last_act_fwd_f32[e] = atof(buffer); e++; } fclose(fileAct); } /* compare */ libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, ref_last_act_fwd_f32, last_act_fwd_f32, 0, 0); printf("##########################################\n"); printf("# Correctness - Last fwd act #\n"); printf("##########################################\n"); printf("L1 reference : %.25g\n", norms_fwd.l1_ref); printf("L1 test : %.25g\n", norms_fwd.l1_tst); printf("L2 abs.error : %.24f\n", norms_fwd.l2_abs); printf("L2 rel.error : %.24f\n", norms_fwd.l2_rel); printf("Linf abs.error: %.24f\n", norms_fwd.linf_abs); printf("Linf rel.error: %.24f\n", norms_fwd.linf_rel); printf("Check-norm : %.24f\n", norms_fwd.normf_rel); libxsmm_matdiff_reduce(&diff, &norms_fwd); fileWt = fopen("weights.txt","r"); if (fileWt != NULL) { int bufferLength = 255; char buffer[bufferLength]; e = 0; while(fgets(buffer, bufferLength, fileWt)) { ref_first_wt_bwdupd_f32[e] = atof(buffer); e++; } fclose(fileWt); } matrix_copy_KCCK_to_KC( ref_first_wt_bwdupd_f32, ref_first_wt_bwdupd_f32_kc, C[0], C[1], bc, bk ); matrix_copy_KCCK_to_KC_bf16( fil_libxsmm[0], first_wt_bwdupd_bf16, C[0], C[1], bc, bk ); libxsmm_convert_bf16_f32( first_wt_bwdupd_bf16, first_wt_bwdupd_f32, C[0]*C[1] ); /* compare */ libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, ref_first_wt_bwdupd_f32_kc, first_wt_bwdupd_f32, 0, 0); printf("##########################################\n"); printf("# Correctness - First bwdupd wt #\n"); printf("##########################################\n"); printf("L1 reference : %.25g\n", norms_bwd.l1_ref); printf("L1 test : %.25g\n", norms_bwd.l1_tst); printf("L2 abs.error : %.24f\n", norms_bwd.l2_abs); printf("L2 rel.error : %.24f\n", norms_bwd.l2_rel); printf("Linf abs.error: %.24f\n", norms_bwd.linf_abs); printf("Linf rel.error: %.24f\n", norms_bwd.linf_rel); printf("Check-norm : %.24f\n", norms_bwd.normf_rel); libxsmm_matdiff_reduce(&diff, &norms_bwd); free(ref_last_act_fwd_f32); free(ref_first_wt_bwdupd_f32); free(ref_first_wt_bwdupd_f32_kc); free(first_wt_bwdupd_bf16); } #endif free(first_wt_bwdupd_f32); free(last_act_fwd_f32); #endif gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0); printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); } /* deallocate data */ if ( scratch != NULL ) { libxsmm_free(scratch); } for ( i = 0; i < num_layers; ++i ) { if ( i == 0 ) { #ifdef ACT_NUMA_INTERLEAVED numa_free(act_libxsmm[i], MB*C[i]*sizeof(libxsmm_bfloat16)); #else libxsmm_free(act_libxsmm[i]); #endif libxsmm_free(delact_libxsmm[i]); } #ifdef ACT_NUMA_INTERLEAVED numa_free(act_libxsmm[i+1], MB*C[i+1]*sizeof(libxsmm_bfloat16)); #else libxsmm_free(act_libxsmm[i+1]); #endif libxsmm_free(delact_libxsmm[i+1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); libxsmm_free(fil_master[i]); } #ifdef ACT_NUMA_INTERLEAVED numa_free(act_libxsmm[num_layers+1], MB*C[num_layers+1]*sizeof(libxsmm_bfloat16)); #else libxsmm_free(act_libxsmm[num_layers+1]); #endif libxsmm_free(label_libxsmm); for (i = 0; i < numa_num_configured_nodes(); i++) { free(numa_thr_cfg[i].blocksOFm_s); free(numa_thr_cfg[i].blocksOFm_e); for (j = 0; j < num_layers; j++) numa_free_aligned(numa_thr_cfg[i].scratch[j], numa_thr_cfg[i].layer_size[j]); free(numa_thr_cfg[i].scratch); free(numa_thr_cfg[i].layer_size); } free(numa_thr_cfg); free( my_opt ); free( my_fc_fwd ); free( my_fc_bwd ); free( act_libxsmm ); free( delact_libxsmm ); free( fil_master ); free( fil_libxsmm ); free( delfil_libxsmm ); free( bias_libxsmm ); free( delbias_libxsmm ); free( relumask_libxsmm ); free( C ); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
veccopy-ompt-target-emi.c
#include <stdio.h> #include <assert.h> #include <omp.h> #include "callbacks.h" int main() { int N = 100000; int a[N]; int b[N]; int i; for (i=0; i<N; i++) a[i]=0; for (i=0; i<N; i++) b[i]=i; #pragma omp target parallel for { for (int j = 0; j< N; j++) a[j]=b[j]; } #pragma omp target teams distribute parallel for { for (int j = 0; j< N; j++) a[j]=b[j]; } int rc = 0; for (i=0; i<N; i++) if (a[i] != b[i] ) { rc++; printf ("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; } /// CHECK: Callback Init: /// CHECK: Callback Load: /// CHECK: Callback Target EMI: kind=1 endpoint=1 /// CHECK: Callback DataOp EMI: endpoint=1 optype=1 /// CHECK: Callback DataOp EMI: endpoint=2 optype=1 /// CHECK: Callback DataOp EMI: endpoint=1 optype=2 /// CHECK: Callback DataOp EMI: endpoint=2 optype=2 /// CHECK: Callback DataOp EMI: endpoint=1 optype=1 /// CHECK: Callback DataOp EMI: endpoint=2 optype=1 /// CHECK: Callback DataOp EMI: endpoint=1 optype=2 /// CHECK: Callback DataOp EMI: endpoint=2 optype=2 /// CHECK: Callback Submit EMI: endpoint=1 req_num_teams=1 /// CHECK: Callback Submit EMI: endpoint=2 req_num_teams=1 /// CHECK: Callback DataOp EMI: endpoint=1 optype=3 /// CHECK: Callback DataOp EMI: endpoint=2 optype=3 /// CHECK: Callback DataOp EMI: endpoint=1 optype=3 /// CHECK: Callback DataOp EMI: endpoint=2 optype=3 /// CHECK: Callback DataOp EMI: endpoint=1 optype=4 /// CHECK: Callback DataOp EMI: endpoint=2 optype=4 /// CHECK: Callback DataOp EMI: endpoint=1 optype=4 /// CHECK: Callback DataOp EMI: endpoint=2 optype=4 /// CHECK: Callback Target EMI: kind=1 endpoint=2 /// CHECK: Callback Target EMI: kind=1 endpoint=1 /// CHECK: Callback DataOp EMI: endpoint=1 optype=1 /// CHECK: Callback DataOp EMI: endpoint=2 optype=1 /// CHECK: Callback DataOp EMI: endpoint=1 optype=2 /// CHECK: Callback DataOp EMI: endpoint=2 optype=2 /// CHECK: Callback DataOp EMI: endpoint=1 optype=1 /// CHECK: Callback DataOp EMI: endpoint=2 optype=1 /// CHECK: Callback DataOp EMI: endpoint=1 optype=2 /// CHECK: Callback DataOp EMI: endpoint=2 optype=2 /// CHECK: Callback Submit EMI: endpoint=1 req_num_teams=0 /// CHECK: Callback Submit EMI: endpoint=2 req_num_teams=0 /// CHECK: Callback DataOp EMI: endpoint=1 optype=3 /// CHECK: Callback DataOp EMI: endpoint=2 optype=3 /// CHECK: Callback DataOp EMI: endpoint=1 optype=3 /// CHECK: Callback DataOp EMI: endpoint=2 optype=3 /// CHECK: Callback DataOp EMI: endpoint=1 optype=4 /// CHECK: Callback DataOp EMI: endpoint=2 optype=4 /// CHECK: Callback DataOp EMI: endpoint=1 optype=4 /// CHECK: Callback DataOp EMI: endpoint=2 optype=4 /// CHECK: Callback Target EMI: kind=1 endpoint=2 /// CHECK: Callback Fini:
fdtd2d.c
/** * fdtd2d.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" #define BENCHMARK_NAME "FDTD-2D" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 /* Problem size. */ #ifdef RUN_POLYBENCH_SIZE #define SIZE 2048 #elif RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define tmax 500 #define NX SIZE #define NY SIZE /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE)i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i * NY + j] = ((DATA_TYPE)i * (j + 1) + 1) / NX; ey[i * NY + j] = ((DATA_TYPE)(i - 1) * (j + 2) + 2) / NX; hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX; } } } void init_array_hz(DATA_TYPE *hz) { int i, j; for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX; } } } int compareResults(DATA_TYPE *hz1, DATA_TYPE *hz2) { int i, j, fail; fail = 0; for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { if (percentDiff(hz1[i * NY + j], hz2[i * NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void runFdtd(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) { int t, i, j; for (t = 0; t < tmax; t++) { for (j = 0; j < NY; j++) { ey[0 * NY + j] = _fict_[t]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i * NY + j] = ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] - 0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i * NY + j] = hz[i * NY + j] - 0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } } } void runFdtd_OMP(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) { int t, i, j; #pragma omp target data map(to : _fict_[ : tmax], ex[ : (NX *(NY + 1))], ey[ : ((NX + 1) * NY)]) map(tofrom : hz[ : (NX *(NY + 1))]) device(DEVICE_ID) { for (t = 0; t < tmax; t++) { #pragma omp target teams distribute parallel for device(DEVICE_ID) for (j = 0; j < NY; j++) { ey[0 * NY + j] = _fict_[t]; } #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i * NY + j] = ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]); } } #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] - 0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]); } } #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i * NY + j] = hz[i * NY + j] - 0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } } } } int main() { double t_start, t_end; int fail = 0; DATA_TYPE *_fict_; DATA_TYPE *ex; DATA_TYPE *ey; DATA_TYPE *hz; DATA_TYPE *hz_outputFromGpu; _fict_ = (DATA_TYPE *)malloc(tmax * sizeof(DATA_TYPE)); ex = (DATA_TYPE *)malloc(NX * (NY + 1) * sizeof(DATA_TYPE)); ey = (DATA_TYPE *)malloc((NX + 1) * NY * sizeof(DATA_TYPE)); hz = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE)); //fprintf(stdout, "<< 2-D Finite Different Time Domain Kernel size: %d>>\n", SIZE); printBenchmarkInfo(BENCHMARK_NAME, SIZE); init_arrays(_fict_, ex, ey, hz); init_array_hz(hz_outputFromGpu); t_start = rtclock(); runFdtd_OMP(_fict_, ex, ey, hz_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); runFdtd(_fict_, ex, ey, hz); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(hz, hz_outputFromGpu); #endif free(_fict_); free(ex); free(ey); free(hz); free(hz_outputFromGpu); return fail; }
o5logon_fmt_plug.c
/* Cracker for Oracle's O5LOGON protocol hashes. Hacked together during * September of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * O5LOGON is used since version 11g. CVE-2012-3137 applies to Oracle 11.1 * and 11.2 databases. Oracle has "fixed" the problem in version 11.2.0.3. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ /* * Modifications (c) 2014 Harrison Neal, released under the same terms * as the original. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_o5logon; #elif FMT_REGISTERS_H john_register_one(&fmt_o5logon); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "sha.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes/aes.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #define OMP_SCALE 512 // tuned on core i7 #endif #include "memdbg.h" #define FORMAT_LABEL "o5logon" #define FORMAT_NAME "Oracle O5LOGON protocol" #define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 48 #define SALT_LENGTH 10 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests o5logon_tests[] = { {"$o5logon$566499330E8896301A1D2711EFB59E756D41AF7A550488D82FE7C8A418E5BE08B4052C0DC404A805C1D7D43FE3350873*4F739806EBC1D7742BC6", "password"}, {"$o5logon$3BB71A77E1DBB5FFCCC8FC8C4537F16584CB5113E4CCE3BAFF7B66D527E32D29DF5A69FA747C4E2C18C1837F750E5BA6*4F739806EBC1D7742BC6", "password"}, {"$o5logon$ED91B97A04000F326F17430A65DACB30CD1EF788E6EC310742B811E32112C0C9CC39554C9C01A090CB95E95C94140C28*7FD52BC80AA5836695D4", "test1"}, {"$o5logon$B7711CC7E805520CEAE8C1AC459F745639E6C9338F192F92204A9518B226ED39851C154CB384E4A58C444A6DF26146E4*3D14D54520BC9E6511F4", "openwall"}, {"$o5logon$76F9BBAEEA9CF70F2A660A909F85F374F16F0A4B1BE1126A062AE9F0D3268821EF361BF08EBEF392F782F2D6D0192FD6*3D14D54520BC9E6511F4", "openwall"}, {"$o5logon$C35A36EA7FF7293EF828B2BD5A2830CA28A57BF621EAE14B605D41A88FC2CF7EFE7C73495FB22F06D6D98317D63DDA71*406813CBAEED2FD4AD23", "MDDATA"}, {"$o5logon$B9AC30E3CD7E1D7C95FA17E1C62D061289C36FD5A6C45C098FF7572AB9AD2B684FB7E131E03CE1543A5A99A30D68DD13*447BED5BE70F7067D646", "sys"}, // the following hash (from HITCON 2014 CTF) revealed multiple bugs in this format (false positives)! // m3odbe // m3o3rt {"$o5logon$A10D52C1A432B61834F4B0D9592F55BD0DA2B440AEEE1858515A646683240D24A61F0C9366C63E93D629292B7891F44A*878C0B92D61A594F2680", "m3ow00"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, any_cracked; static struct custom_salt { char unsigned salt[SALT_LENGTH]; /* AUTH_VFR_DATA */ char unsigned ct[CIPHERTEXT_LENGTH]; /* AUTH_SESSKEY */ } *cur_salt; static aes_fptr_cbc aesFunc; static void init(struct fmt_main *self) { char *Buf; #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); cracked = mem_calloc_tiny(sizeof(*cracked) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); aesFunc = get_AES_dec192_CBC(); Buf = mem_alloc_tiny(128, 1); sprintf(Buf, "%s %s", self->params.algorithm_name, get_AES_type_string()); self->params.algorithm_name=Buf; } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; if (strncmp(ciphertext, "$o5logon$", 9)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 9; p = strtok(ctcopy, "*"); /* ciphertext */ if(!p) goto err; if(strlen(p) != CIPHERTEXT_LENGTH * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt */ goto err; if(strlen(p) != SALT_LENGTH * 2) goto err; if (!ishex(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; ctcopy += 9; /* skip over "$o5logon$" */ p = strtok(ctcopy, "*"); for (i = 0; i < CIPHERTEXT_LENGTH; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); for (i = 0; i < SALT_LENGTH; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, sizeof(*cracked) * count); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { unsigned char key[24]; unsigned char pt[16]; unsigned char iv[16]; // No longer using AES key here. SHA_CTX ctx; memset(&key[20], 0, 4); SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA1_Update(&ctx, cur_salt->salt, 10); SHA1_Final(key, &ctx); memcpy(iv, cur_salt->ct + 16, 16); // Using AES function: // in (cipher), out (plain), key, block count, iv aesFunc(cur_salt->ct + 32, pt, key, 1, iv); if (!memcmp(pt + 8, "\x08\x08\x08\x08\x08\x08\x08\x08", 8)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void o5logon_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_o5logon = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif o5logon_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, o5logon_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */