source
stringlengths
3
92
c
stringlengths
26
2.25M
neuralNetwork.h
#ifndef NNETWORK #define NNETWORK //standard libraries #include <math.h> #include <ctime> #include <vector> #include <fstream> #include <sstream> //custom includes #include "dataEntry.h" using namespace std; //Constant Defaults! #define LEARNING_RATE 0.001 #define MOMENTUM 0.9 #define MAX_EPOCHS 1500 #define DESIRED_ACCURACY 90 /******************************************************************************************************************* * NEURAL NETWORK CLASS * ---------------------------------------------------------------------------------------------------------------- * Classic Back-propagation Neural Network ( makes use of gradient descent ) * Can toggle between stochastic and batch learning * ---------------------------------------------------------------------------------------------------------------- * Author: Bobby Anguelov (banguelov@cs.up.ac.za) * Downloaded From: takinginitiative.wordpress.co.za *******************************************************************************************************************/ class neuralNetwork { //private members //---------------------------------------------------------------------------------------------------------------- private: //learning parameters double learningRate; // adjusts the step size of the weight update double momentum; // improves performance of stochastic learning (don't use for batch) //number of neurons int nInput, nHidden, nOutput; //neurons double* inputNeurons; double* hiddenNeurons; double* outputNeurons; //weights double** wInputHidden; double** wHiddenOutput; //epoch counter long epoch; long maxEpochs; //accuracy required double desiredAccuracy; //change to weights double** deltaInputHidden; double** deltaHiddenOutput; //error gradients double* hiddenErrorGradients; double* outputErrorGradients; //accuracy stats per epoch double trainingSetAccuracy; double validationSetAccuracy; double generalizationSetAccuracy; double trainingSetMSE; double validationSetMSE; double generalizationSetMSE; //batch learning flag bool useBatch; //log file handle bool logResults; fstream logFile; int logResolution; int lastEpochLogged; //public methods //---------------------------------------------------------------------------------------------------------------- public: //constructor neuralNetwork(int in, int hidden, int out) : nInput(in), nHidden(hidden), nOutput(out), epoch(0), logResults(false), logResolution(10), lastEpochLogged(-10), trainingSetAccuracy(0), validationSetAccuracy(0), generalizationSetAccuracy(0), trainingSetMSE(0), validationSetMSE(0), generalizationSetMSE(0) { //create neuron lists //-------------------------------------------------------------------------------------------------------- inputNeurons = new( double[in + 1] ); for ( int i=0; i < in; i++ ) inputNeurons[i] = 0; //create bias neuron inputNeurons[in] = -1; hiddenNeurons = new( double[hidden + 1] ); for ( int i=0; i < hidden; i++ ) hiddenNeurons[i] = 0; //create bias neuron hiddenNeurons[hidden] = -1; outputNeurons = new( double[out] ); for ( int i=0; i < out; i++ ) outputNeurons[i] = 0; //create weight lists (include bias neuron weights) //-------------------------------------------------------------------------------------------------------- wInputHidden = new( double*[in + 1] ); for ( int i=0; i <= in; i++ ) { wInputHidden[i] = new (double[hidden]); for ( int j=0; j < hidden; j++ ) wInputHidden[i][j] = 0; } wHiddenOutput = new( double*[hidden + 1] ); for ( int i=0; i <= hidden; i++ ) { wHiddenOutput[i] = new (double[out]); for ( int j=0; j < out; j++ ) wHiddenOutput[i][j] = 0; } //create delta lists //-------------------------------------------------------------------------------------------------------- deltaInputHidden = new( double*[in + 1] ); for ( int i=0; i <= in; i++ ) { deltaInputHidden[i] = new (double[hidden]); for ( int j=0; j < hidden; j++ ) deltaInputHidden[i][j] = 0; } deltaHiddenOutput = new( double*[hidden + 1] ); for ( int i=0; i <= hidden; i++ ) { deltaHiddenOutput[i] = new (double[out]); for ( int j=0; j < out; j++ ) deltaHiddenOutput[i][j] = 0; } //create error gradient storage //-------------------------------------------------------------------------------------------------------- hiddenErrorGradients = new( double[hidden + 1] ); for ( int i=0; i <= hidden; i++ ) hiddenErrorGradients[i] = 0; outputErrorGradients = new( double[out + 1] ); for ( int i=0; i <= out; i++ ) outputErrorGradients[i] = 0; //initialize weights //-------------------------------------------------------------------------------------------------------- initializeWeights(); //default learning parameters //-------------------------------------------------------------------------------------------------------- learningRate = LEARNING_RATE; momentum = MOMENTUM; //use stochastic learning by default useBatch = false; //stop conditions //-------------------------------------------------------------------------------------------------------- maxEpochs = MAX_EPOCHS; desiredAccuracy = DESIRED_ACCURACY; } //destructor ~neuralNetwork() { //delete neurons delete[] inputNeurons; delete[] hiddenNeurons; delete[] outputNeurons; //delete weight storage for (int i=0; i <= nInput; i++) delete[] wInputHidden[i]; delete[] wInputHidden; for (int j=0; j <= nHidden; j++) delete[] wHiddenOutput[j]; delete[] wHiddenOutput; //delete delta storage for (int i=0; i <= nInput; i++) delete[] deltaInputHidden[i]; delete[] deltaInputHidden; for (int j=0; j <= nHidden; j++) delete[] deltaHiddenOutput[j]; delete[] deltaHiddenOutput; //delete error gradients delete[] hiddenErrorGradients; delete[] outputErrorGradients; //close log file if ( logFile.is_open() ) logFile.close(); } //set learning parameters void setLearningParameters(double lr, double m) { learningRate = lr; momentum = m; } //set max epoch void setMaxEpochs(int max) { maxEpochs = max; } //set desired accuracy void setDesiredAccuracy(float d) { desiredAccuracy = d; } //enable batch learning void useBatchLearning() { useBatch = true; } //enable stochastic learning void useStochasticLearning() { useBatch = false; } //enable logging of training results void enableLogging(const char* filename, int resolution = 1) { //create log file if ( ! logFile.is_open() ) { logFile.open(filename, ios::out); if ( logFile.is_open() ) { //write log file header logFile << "Epoch,Training Set Accuracy, Generalization Set Accuracy,Training Set MSE, Generalization Set MSE" << endl; //enable logging logResults = true; //resolution setting; logResolution = resolution; lastEpochLogged = -resolution; } } } //resets the neural network void resetWeights() { //reinitialize weights initializeWeights(); } //feed data through network double* feedInput( double* inputs) { //feed data into the network feedForward(inputs); //return results return outputNeurons; } //train the network void trainNetwork( vector<dataEntry*> trainingSet, vector<dataEntry*> generalizationSet, vector<dataEntry*> validationSet ) { cout<< endl << " Neural Network Training Starting: " << endl << "==========================================================================" << endl << " LR: " << learningRate << ", Momentum: " << momentum << ", Max Epochs: " << maxEpochs << endl << " " << nInput << " Input Neurons, " << nHidden << " Hidden Neurons, " << nOutput << " Output Neurons" << endl << "==========================================================================" << endl << endl; //reset epoch and log counters epoch = 0; lastEpochLogged = -logResolution; //train network using training dataset for training and generalization dataset for testing //-------------------------------------------------------------------------------------------------------- while ( ( trainingSetAccuracy < desiredAccuracy || generalizationSetAccuracy < desiredAccuracy ) && epoch < maxEpochs ) { //store previous accuracy double previousTAccuracy = trainingSetAccuracy; double previousGAccuracy = generalizationSetAccuracy; //use training set to train network runTrainingEpoch( trainingSet ); //get generalization set accuracy and MSE generalizationSetAccuracy = getSetAccuracy( generalizationSet ); generalizationSetMSE = getSetMSE( generalizationSet ); //Log Training results if (logResults && logFile.is_open() && ( epoch - lastEpochLogged == logResolution ) ) { logFile << epoch << "," << trainingSetAccuracy << "," << generalizationSetAccuracy << "," << trainingSetMSE << "," << generalizationSetMSE << endl; lastEpochLogged = epoch; } //print out change in training /generalization accuracy (only if a change is greater than a percent) if ( ceil(previousTAccuracy) != ceil(trainingSetAccuracy) || ceil(previousGAccuracy) != ceil(generalizationSetAccuracy) ) { cout << "Epoch :" << epoch; cout << " TSet Acc:" << trainingSetAccuracy << "%, MSE: " << trainingSetMSE ; cout << " GSet Acc:" << generalizationSetAccuracy << "%, MSE: " << generalizationSetMSE << endl; } //once training set is complete increment epoch epoch++; }//end while //get validation set accuracy and MSE validationSetAccuracy = getSetAccuracy(validationSet); validationSetMSE = getSetMSE(validationSet); //log end logFile << epoch << "," << trainingSetAccuracy << "," << generalizationSetAccuracy << "," << trainingSetMSE << "," << generalizationSetMSE << endl << endl; logFile << "Training Complete!!! - > Elapsed Epochs: " << epoch << " Validation Set Accuracy: " << validationSetAccuracy << " Validation Set MSE: " << validationSetMSE << endl; //out validation accuracy and MSE cout << endl << "Training Complete!!! - > Elapsed Epochs: " << epoch << endl; cout << " Validation Set Accuracy: " << validationSetAccuracy << endl; cout << " Validation Set MSE: " << validationSetMSE << endl << endl; } //private methods //---------------------------------------------------------------------------------------------------------------- private: //initialize weights and weight changes void initializeWeights() { //init random number generator srand( (unsigned int) time(0) ); //set weights between input and hidden to a random value between -05 and 0.5 //-------------------------------------------------------------------------------------------------------- for(int i = 0; i <= nInput; i++) { for(int j = 0; j < nHidden; j++) { //set weights to random values wInputHidden[i][j] = (double)rand() / (RAND_MAX + 1) - 0.5; //create blank delta deltaInputHidden[i][j] = 0; } } //set weights between input and hidden to a random value between -05 and 0.5 //-------------------------------------------------------------------------------------------------------- for(int i = 0; i <= nHidden; i++) { for(int j = 0; j < nOutput; j++) { //set weights to random values wHiddenOutput[i][j] = (double)rand() / (RAND_MAX + 1) - 0.5; //create blank delta deltaHiddenOutput[i][j] = 0; } } } //run a single training epoch void runTrainingEpoch( vector<dataEntry*> trainingSet ) { //incorrect patterns double incorrectPatterns = 0; double mse = 0; //for every training pattern for ( int tp = 0; tp < (int) trainingSet.size(); tp++) { //feed inputs through network and backpropagate errors feedForward( trainingSet[tp]->pattern ); backpropagate( trainingSet[tp]->target ); //pattern correct flag bool patternCorrect = true; //check all outputs from neural network against desired values for ( int k = 0; k < nOutput; k++ ) { //pattern incorrect if desired and output differ if ( getRoundedOutputValue( outputNeurons[k] ) != trainingSet[tp]->target[k] ) patternCorrect = false; //calculate MSE mse += pow((outputNeurons[k] - trainingSet[tp]->target[k]), 2); } //if pattern is incorrect add to incorrect count if ( !patternCorrect ) incorrectPatterns++; }//end for //if using batch learning - update the weights if ( useBatch ) updateWeights(); //update training accuracy and MSE trainingSetAccuracy = 100 - (incorrectPatterns/trainingSet.size() * 100); trainingSetMSE = mse / ( nOutput * trainingSet.size() ); } //feed input forward void feedForward( double *inputs ) { //set input neurons to input values for(int i = 0; i < nInput; i++) inputNeurons[i] = inputs[i]; //Calculate Hidden Layer values - include bias neuron //-------------------------------------------------------------------------------------------------------- #pragma omp parallel for for(int j=0; j < nHidden; j++) { //clear value hiddenNeurons[j] = 0; //get weighted sum of inputs and bias neuron for( int i=0; i <= nInput; i++ ) hiddenNeurons[j] += inputNeurons[i] * wInputHidden[i][j]; //set to result of sigmoid hiddenNeurons[j] = activationFunction( hiddenNeurons[j] ); } //Calculating Output Layer values - include bias neuron //-------------------------------------------------------------------------------------------------------- #pragma omp parallel for for(int k=0; k < nOutput; k++) { //clear value outputNeurons[k] = 0; //get weighted sum of inputs and bias neuron for( int j=0; j <= nHidden; j++ ) outputNeurons[k] += hiddenNeurons[j] * wHiddenOutput[j][k]; //set to result of sigmoid outputNeurons[k] = activationFunction( outputNeurons[k] ); } } //modify weights according to ouput void backpropagate( double* desiredValues ) { //modify deltas between hidden and output layers //-------------------------------------------------------------------------------------------------------- #pragma omp parallel for for (int k = 0; k < nOutput; k++) { //get error gradient for every output node outputErrorGradients[k] = getOutputErrorGradient( desiredValues[k], outputNeurons[k] ); //for all nodes in hidden layer and bias neuron for (int j = 0; j <= nHidden; j++) { //calculate change in weight if ( !useBatch ) deltaHiddenOutput[j][k] = learningRate * hiddenNeurons[j] * outputErrorGradients[k] + momentum * deltaHiddenOutput[j][k]; else deltaHiddenOutput[j][k] += learningRate * hiddenNeurons[j] * outputErrorGradients[k]; } } //modify deltas between input and hidden layers //-------------------------------------------------------------------------------------------------------- #pragma omp parallel for for (int j = 0; j < nHidden; j++) { //get error gradient for every hidden node hiddenErrorGradients[j] = getHiddenErrorGradient( j ); //for all nodes in input layer and bias neuron for (int i = 0; i <= nInput; i++) { //calculate change in weight if ( !useBatch ) deltaInputHidden[i][j] = learningRate * inputNeurons[i] * hiddenErrorGradients[j] + momentum * deltaInputHidden[i][j]; else deltaInputHidden[i][j] += learningRate * inputNeurons[i] * hiddenErrorGradients[j]; } } //if using stochastic learning update the weights immediately if ( !useBatch ) updateWeights(); } //update weights void updateWeights() { //input -> hidden weights //-------------------------------------------------------------------------------------------------------- #pragma omp parallel for for (int i = 0; i <= nInput; i++) { for (int j = 0; j < nHidden; j++) { //update weight wInputHidden[i][j] += deltaInputHidden[i][j]; //clear delta only if using batch (previous delta is needed for momentum if (useBatch) deltaInputHidden[i][j] = 0; } } //hidden -> output weights //-------------------------------------------------------------------------------------------------------- #pragma omp parallel for for (int j = 0; j <= nHidden; j++) { for (int k = 0; k < nOutput; k++) { //update weight wHiddenOutput[j][k] += deltaHiddenOutput[j][k]; //clear delta only if using batch (previous delta is needed for momentum) if (useBatch)deltaHiddenOutput[j][k] = 0; } } } //activation function inline double activationFunction( double x ) { //sigmoid function return 1/(1+exp(-x)); } //get error gradient for ouput layer inline double getOutputErrorGradient(double desiredValue, double outputValue) { //return error gradient return outputValue * ( 1 - outputValue ) * ( desiredValue - outputValue ); } //get error gradient for hidden layer double getHiddenErrorGradient( int j ) { //get sum of hidden->output weights * output error gradients double weightedSum = 0; for( int k = 0; k < nOutput; k++ ) weightedSum += wHiddenOutput[j][k] * outputErrorGradients[k]; //return error gradient return hiddenNeurons[j] * ( 1 - hiddenNeurons[j] ) * weightedSum; } //round up value to get a boolean result int getRoundedOutputValue( double x ) { if ( x < 0.1 ) return 0; else if ( x > 0.9 ) return 1; else return -1; } //feed forward set of patterns and return error double getSetAccuracy( vector<dataEntry*> set ) { double incorrectResults = 0; //for every training input array for ( int tp = 0; tp < (int) set.size(); tp++) { //feed inputs through network and backpropagate errors feedForward( set[tp]->pattern ); //correct pattern flag bool correctResult = true; //check all outputs against desired output values for ( int k = 0; k < nOutput; k++ ) { //set flag to false if desired and output differ if ( getRoundedOutputValue(outputNeurons[k]) != set[tp]->target[k] ) correctResult = false; } //inc training error for a incorrect result if ( !correctResult ) incorrectResults++; }//end for //calculate error and return as percentage return 100 - (incorrectResults/set.size() * 100); } //feed forward set of patterns and return MSE double getSetMSE ( vector<dataEntry*> set ) { double mse = 0; //for every training input array for ( int tp = 0; tp < (int) set.size(); tp++) { //feed inputs through network and backpropagate errors feedForward( set[tp]->pattern ); //check all outputs against desired output values for ( int k = 0; k < nOutput; k++ ) { //sum all the MSEs together mse += pow((outputNeurons[k] - set[tp]->target[k]), 2); } }//end for //calculate error and return as percentage return mse/(nOutput * set.size()); } }; #endif
mic_spat_to_SH.gen.c
/* * Copyright (c) 2010-2015 Centre National de la Recherche Scientifique. * written by Nathanael Schaeffer (CNRS, ISTerre, Grenoble, France). * * nathanael.schaeffer@ujf-grenoble.fr * * This software is governed by the CeCILL license under French law and * abiding by the rules of distribution of free software. You can use, * modify and/or redistribute the software under the terms of the CeCILL * license as circulated by CEA, CNRS and INRIA at the following URL * "http://www.cecill.info". * * The fact that you are presently reading this means that you have had * knowledge of the CeCILL license and that you accept its terms. * */ # This file is meta-code for SHT.c (spherical harmonic transform). # it is intended for "make" to generate C code for 3 similar SHT functions, # (namely spat_to_SH [Q tag]), spat_to_SHsphtor [V tag], spat_to_SH3 [both Q&V tags]) # from one generic function + tags. # Basically, there are tags at the beginning of lines (Q,V) that are information # to keep or remove the line depending on the function to build. (Q for scalar, V for vector, # for comment) # ////////////////////////////////////////////////// static QX void GEN3(_an1,NWAY,SUFFIX)(shtns_cfg shtns, double *BrF, cplx *Qlm, const long int llim, const int imlim) VX void GEN3(_an2,NWAY,SUFFIX)(shtns_cfg shtns, double *BtF, double *BpF, cplx *Slm, cplx *Tlm, const long int llim, const int imlim) 3 void GEN3(_an3,NWAY,SUFFIX)(shtns_cfg shtns, double *BrF, double *BtF, double *BpF, cplx *Qlm, cplx *Slm, cplx *Tlm, const long int llim, const int imlim) { #define NW (NWAY*2) double *alm, *al; double *wg, *ct, *st; V double *l_2; long int nk, k, l,m; unsigned m0, mstep; int k_inc, m_inc; #ifndef SHT_AXISYM unsigned im; V double m_1; #endif Q v2d qq[llim]; V v2d ss[llim]; V v2d tt[llim]; Q double rer[NLAT_2 + NW*VSIZE2] SSE; Q double ror[NLAT_2 + NW*VSIZE2] SSE; V double ter[NLAT_2 + NW*VSIZE2] SSE; V double tor[NLAT_2 + NW*VSIZE2] SSE; V double per[NLAT_2 + NW*VSIZE2] SSE; V double por[NLAT_2 + NW*VSIZE2] SSE; #ifndef SHT_AXISYM Q double rei[NLAT_2 + NW*VSIZE2] SSE; Q double roi[NLAT_2 + NW*VSIZE2] SSE; V double tei[NLAT_2 + NW*VSIZE2] SSE; V double toi[NLAT_2 + NW*VSIZE2] SSE; V double pei[NLAT_2 + NW*VSIZE2] SSE; V double poi[NLAT_2 + NW*VSIZE2] SSE; #endif nk = NLAT_2; // copy NLAT_2 to a local variable for faster access (inner loop limit) #if _GCC_VEC_ nk = ((unsigned) nk+(VSIZE2-1))/VSIZE2; #endif wg = shtns->wg; ct = shtns->ct; st = shtns->st; V l_2 = shtns->l_2; for (k=nk*VSIZE2; k<(nk-1+NW)*VSIZE2; ++k) { // never written, so this is now done for all m's Q rer[k] = 0.0; ror[k] = 0.0; V ter[k] = 0.0; tor[k] = 0.0; V per[k] = 0.0; por[k] = 0.0; #ifndef SHT_AXISYM Q rei[k] = 0.0; roi[k] = 0.0; V tei[k] = 0.0; toi[k] = 0.0; V pei[k] = 0.0; poi[k] = 0.0; #endif } // ACCESS PATTERN k_inc = shtns->k_stride_a; m_inc = shtns->m_stride_a; #ifndef _OPENMP m0 = 0; mstep = 1; #else m0 = omp_get_thread_num(); mstep = omp_get_num_threads(); if (m0 == 0) #endif { // im=0 : dzl.p = 0.0 and evrything is REAL alm = shtns->blm; Q double r0 = 0.0; Q k=0; do { // compute symmetric and antisymmetric parts. (do not weight here, it is cheaper to weight y0) Q double an = BrF[k*k_inc]; double bn = BrF[k*k_inc +1]; Q double bs = BrF[(NLAT-2-k)*k_inc]; double as = BrF[(NLAT-2-k)*k_inc +1]; Q rer[k] = an+as; ror[k] = an-as; Q rer[k+1] = bn+bs; ror[k+1] = bn-bs; Q r0 += (an+as)*wg[k] + (bn+bs)*wg[k+1]; Q k+=2; Q } while(k < nk*VSIZE2); V k=0; do { // compute symmetric and antisymmetric parts. (do not weight here, it is cheaper to weight y0) V double an = BtF[k*k_inc]; double bn = BtF[k*k_inc +1]; V double bs = BtF[(NLAT-2-k)*k_inc]; double as = BtF[(NLAT-2-k)*k_inc +1]; V ter[k] = an+as; tor[k] = an-as; V ter[k+1] = bn+bs; tor[k+1] = bn-bs; V k+=2; V } while(k < nk*VSIZE2); V k=0; do { // compute symmetric and antisymmetric parts. (do not weight here, it is cheaper to weight y0) V double an = BpF[k*k_inc]; double bn = BpF[k*k_inc +1]; V double bs = BpF[(NLAT-2-k)*k_inc]; double as = BpF[(NLAT-2-k)*k_inc +1]; V per[k] = an+as; por[k] = an-as; V per[k+1] = bn+bs; por[k+1] = bn-bs; V k+=2; V } while(k < nk*VSIZE2); Q Qlm[0] = r0 * alm[0]; // l=0 is done. V Slm[0] = 0.0; Tlm[0] = 0.0; // l=0 is zero for the vector transform. k = 0; Q double* q_ = (double*) qq; V double* s_ = (double*) ss; double* t_ = (double*) tt; for (l=0;l<llim;++l) { Q q_[l] = 0.0; V s_[l] = 0.0; t_[l] = 0.0; } do { al = alm; rnd cost[NW], y0[NW], y1[NW]; V rnd sint[NW], dy0[NW], dy1[NW]; Q rnd rerk[NW], rork[NW]; // help the compiler to cache into registers. V rnd terk[NW], tork[NW], perk[NW], pork[NW]; for (int j=0; j<NW; ++j) { cost[j] = vread(ct, k+j); y0[j] = vall(al[0]) * vread(wg, k+j); // weight of Gauss quadrature appears here V dy0[j] = vall(0.0); V sint[j] = -vread(st, k+j); y1[j] = (vall(al[1])*y0[j]) * cost[j]; V dy1[j] = (vall(al[1])*y0[j]) * sint[j]; Q rerk[j] = vread(rer, k+j); rork[j] = vread(ror, k+j); // cache into registers. V terk[j] = vread(ter, k+j); tork[j] = vread(tor, k+j); V perk[j] = vread(per, k+j); pork[j] = vread(por, k+j); } al+=2; l=1; while(l<llim) { for (int j=0; j<NW; ++j) { V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*sint[j]) + vall(al[0])*dy0[j]; y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j]; } Q rnd q = y1[0] * rork[0]; V rnd s = dy1[0] * terk[0]; V rnd t = dy1[0] * perk[0]; for (int j=1; j<NW; ++j) { Q q += y1[j] * rork[j]; V s += dy1[j] * terk[j]; V t += dy1[j] * perk[j]; } Q q_[l-1] += reduce_add(q); V s_[l-1] += reduce_add(s); V t_[l-1] -= reduce_add(t); for (int j=0; j<NW; ++j) { V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*sint[j]) + vall(al[2])*dy1[j]; y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j]; } Q q = y0[0] * rerk[0]; V s = dy0[0] * tork[0]; V t = dy0[0] * pork[0]; for (int j=1; j<NW; ++j) { Q q += y0[j] * rerk[j]; V s += dy0[j] * tork[j]; V t += dy0[j] * pork[j]; } Q q_[l] += reduce_add(q); V s_[l] += reduce_add(s); V t_[l] -= reduce_add(t); al+=4; l+=2; } if (l==llim) { Q rnd q = y1[0] * rork[0]; V rnd s = dy1[0] * terk[0]; V rnd t = dy1[0] * perk[0]; for (int j=1; j<NW; ++j) { Q q += y1[j] * rork[j]; V s += dy1[j] * terk[j]; V t += dy1[j] * perk[j]; } Q q_[l-1] += reduce_add(q); V s_[l-1] += reduce_add(s); V t_[l-1] -= reduce_add(t); } k+=NW; } while (k < nk); for (l=1; l<=llim; ++l) { Q Qlm[l] = q_[l-1]; V Slm[l] = s_[l-1]*l_2[l]; Tlm[l] = t_[l-1]*l_2[l]; } #ifdef SHT_VAR_LTR for (l=llim+1; l<= LMAX; ++l) { Q ((v2d*)Qlm)[l] = vdup(0.0); V ((v2d*)Slm)[l] = vdup(0.0); ((v2d*)Tlm)[l] = vdup(0.0); } #ifndef SHT_AXISYM if (imlim <= MMAX) { // zero out m >= imlim l = LiM(shtns, imlim*MRES, imlim); do { Q ((v2d*)Qlm)[l] = vdup(0.0); V ((v2d*)Slm)[l] = vdup(0.0); ((v2d*)Tlm)[l] = vdup(0.0); } while(++l < shtns->nlm); } #endif #endif m0=mstep; } #ifndef SHT_AXISYM for (im=m0; im<imlim; im+=mstep) { m = im*MRES; l = shtns->tm[im] / VSIZE2; alm = shtns->blm + im*(2*LMAX -m+MRES); Q k = ((l*VSIZE2)>>1)*2; // k must be even here. Q do { // compute symmetric and antisymmetric parts, and reorganize data. Q double an, bn, ani, bni, bs, as, bsi, asi, t; 3 double sina = st[k]; double sinb = st[k+1]; Q ani = BrF[im*m_inc + k*k_inc]; bni = BrF[im*m_inc + k*k_inc +1]; // north Q an = BrF[(NPHI-im)*m_inc + k*k_inc]; bn = BrF[(NPHI-im)*m_inc + k*k_inc +1]; Q t = ani-an; an += ani; ani = bn-bni; bn += bni; bni = t; 3 an *= sina; ani*= sina; bn *= sinb; bni *= sinb; Q bsi = BrF[im*m_inc + (NLAT-2 -k)*k_inc]; asi = BrF[im*m_inc + (NLAT-2-k)*k_inc + 1]; // south Q bs = BrF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc]; as = BrF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc +1]; Q t = bsi-bs; bs += bsi; bsi = as-asi; as += asi; asi = t; 3 as *= sina; asi*= sina; bs *= sinb; bsi *= sinb; Q rer[k] = an+as; rei[k] = ani+asi; rer[k+1] = bn+bs; rei[k+1] = bni+bsi; Q ror[k] = an-as; roi[k] = ani-asi; ror[k+1] = bn-bs; roi[k+1] = bni-bsi; Q k+=2; Q } while (k<nk*VSIZE2); V k = ((l*VSIZE2)>>1)*2; // k must be even here. V do { // compute symmetric and antisymmetric parts, and reorganize data. V double an, bn, ani, bni, bs, as, bsi, asi, t; V ani = BtF[im*m_inc + k*k_inc]; bni = BtF[im*m_inc + k*k_inc +1]; // north V an = BtF[(NPHI-im)*m_inc + k*k_inc]; bn = BtF[(NPHI-im)*m_inc + k*k_inc +1]; V t = ani-an; an += ani; ani = bn-bni; bn += bni; bni = t; V bsi = BtF[im*m_inc + (NLAT-2 -k)*k_inc]; asi = BtF[im*m_inc + (NLAT-2-k)*k_inc + 1]; // south V bs = BtF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc]; as = BtF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc +1]; V t = bsi-bs; bs += bsi; bsi = as-asi; as += asi; asi = t; V ter[k] = an+as; tei[k] = ani+asi; ter[k+1] = bn+bs; tei[k+1] = bni+bsi; V tor[k] = an-as; toi[k] = ani-asi; tor[k+1] = bn-bs; toi[k+1] = bni-bsi; V k+=2; V } while (k<nk*VSIZE2); V k = ((l*VSIZE2)>>1)*2; // k must be even here. V do { // compute symmetric and antisymmetric parts, and reorganize data. V double an, bn, ani, bni, bs, as, bsi, asi, t; V ani = BpF[im*m_inc + k*k_inc]; bni = BpF[im*m_inc + k*k_inc +1]; // north V an = BpF[(NPHI-im)*m_inc + k*k_inc]; bn = BpF[(NPHI-im)*m_inc + k*k_inc +1]; V t = ani-an; an += ani; ani = bn-bni; bn += bni; bni = t; V bsi = BpF[im*m_inc + (NLAT-2 -k)*k_inc]; asi = BpF[im*m_inc + (NLAT-2-k)*k_inc + 1]; // south V bs = BpF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc]; as = BpF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc +1]; V t = bsi-bs; bs += bsi; bsi = as-asi; as += asi; asi = t; V per[k] = an+as; pei[k] = ani+asi; per[k+1] = bn+bs; pei[k+1] = bni+bsi; V por[k] = an-as; poi[k] = ani-asi; por[k+1] = bn-bs; poi[k+1] = bni-bsi; V k+=2; V } while (k<nk*VSIZE2); V m_1 = 1.0/m; k=l; for (l=0; l<=llim-m; l++) { Q qq[l] = vdup(0.0); V ss[l] = vdup(0.0); tt[l] = vdup(0.0); } do { Q v2d* q = qq; V v2d* s = ss; v2d* t = tt; al = alm; rnd cost[NW], y0[NW], y1[NW]; V rnd st2[NW], dy0[NW], dy1[NW]; Q rnd rerk[NW], reik[NW], rork[NW], roik[NW]; // help the compiler to cache into registers. V rnd terk[NW], teik[NW], tork[NW], toik[NW]; V rnd perk[NW], peik[NW], pork[NW], poik[NW]; for (int j=0; j<NW; ++j) { cost[j] = vread(st, k+j); y0[j] = vall(0.5); V st2[j] = cost[j]*cost[j]*vall(-m_1); V y0[j] *= vall(m); // for the vector transform, compute ylm*m/sint } Q l=m; V l=m-1; long int ny = 0; // exponent to extend double precision range. if ((int)llim <= SHT_L_RESCALE_FLY) { do { // sin(theta)^m if (l&1) for (int j=0; j<NW; ++j) y0[j] *= cost[j]; for (int j=0; j<NW; ++j) cost[j] *= cost[j]; } while(l >>= 1); } else { long int nsint = 0; do { // sin(theta)^m (use rescaling to avoid underflow) if (l&1) { for (int j=0; j<NW; ++j) y0[j] *= cost[j]; ny += nsint; if (vlo(y0[0]) < (SHT_ACCURACY+1.0/SHT_SCALE_FACTOR)) { ny--; for (int j=0; j<NW; ++j) y0[j] *= vall(SHT_SCALE_FACTOR); } } for (int j=0; j<NW; ++j) cost[j] *= cost[j]; nsint += nsint; if (vlo(cost[0]) < 1.0/SHT_SCALE_FACTOR) { nsint--; for (int j=0; j<NW; ++j) cost[j] *= vall(SHT_SCALE_FACTOR); } } while(l >>= 1); } for (int j=0; j<NW; ++j) { y0[j] *= vall(al[0]); cost[j] = vread(ct, k+j); V dy0[j] = cost[j]*y0[j]; y1[j] = (vall(al[1])*y0[j]) *cost[j]; V dy1[j] = (vall(al[1])*y0[j]) *(cost[j]*cost[j] + st2[j]); } l=m; al+=2; while ((ny<0) && (l<llim)) { // ylm treated as zero and ignored if ny < 0 for (int j=0; j<NW; ++j) { V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*st2[j]) + vall(al[0])*dy0[j]; y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j]; } for (int j=0; j<NW; ++j) { V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*st2[j]) + vall(al[2])*dy1[j]; y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j]; } l+=2; al+=4; if (fabs(vlo(y0[NW-1])) > SHT_ACCURACY*SHT_SCALE_FACTOR + 1.0) { // rescale when value is significant ++ny; for (int j=0; j<NW; ++j) { y0[j] *= vall(1.0/SHT_SCALE_FACTOR); y1[j] *= vall(1.0/SHT_SCALE_FACTOR); V dy0[j] *= vall(1.0/SHT_SCALE_FACTOR); dy1[j] *= vall(1.0/SHT_SCALE_FACTOR); } } } if (ny == 0) { Q q+=(l-m); V s+=(l-m); t+=(l-m); for (int j=0; j<NW; ++j) { // prefetch y0[j] *= vread(wg, k+j); y1[j] *= vread(wg, k+j); // weight appears here (must be after the previous accuracy loop). V dy0[j] *= vread(wg, k+j); dy1[j] *= vread(wg, k+j); Q rerk[j] = vread( rer, k+j); reik[j] = vread( rei, k+j); rork[j] = vread( ror, k+j); roik[j] = vread( roi, k+j); V terk[j] = vread( ter, k+j); teik[j] = vread( tei, k+j); tork[j] = vread( tor, k+j); toik[j] = vread( toi, k+j); V perk[j] = vread( per, k+j); peik[j] = vread( pei, k+j); pork[j] = vread( por, k+j); poik[j] = vread( poi, k+j); } while (l<llim) { // compute even and odd parts Q rnd qq0 = y0[0] * rerk[0]; Q rnd qq1 = y0[0] * reik[0]; V rnd ss0 = dy0[0] * tork[0] + y0[0] * peik[0]; V rnd ss1 = dy0[0] * toik[0] - y0[0] * perk[0]; V rnd tt0 = dy0[0] * pork[0] - y0[0] * teik[0]; V rnd tt1 = dy0[0] * poik[0] + y0[0] * terk[0]; Q for (int j=1; j<NW; ++j) qq0 += y0[j] * rerk[j]; // real even Q for (int j=1; j<NW; ++j) qq1 += y0[j] * reik[j]; // imag even V for (int j=1; j<NW; ++j) ss0 += dy0[j] * tork[j] + y0[j] * peik[j]; V for (int j=1; j<NW; ++j) ss1 += dy0[j] * toik[j] - y0[j] * perk[j]; V for (int j=1; j<NW; ++j) tt0 += dy0[j] * pork[j] - y0[j] * teik[j]; V for (int j=1; j<NW; ++j) tt1 += dy0[j] * poik[j] + y0[j] * terk[j]; Q q[0] += v2d_reduce(qq0, qq1); V s[0] += v2d_reduce(ss0, ss1); V t[0] -= v2d_reduce(tt0, tt1); for (int j=0; j<NW; ++j) { V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*st2[j]) + vall(al[0])*dy0[j]; y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j]; } Q qq0 = y1[0] * rork[0]; Q qq1 = y1[0] * roik[0]; V ss0 = dy1[0] * terk[0] + y1[0] * poik[0]; V ss1 = dy1[0] * teik[0] - y1[0] * pork[0]; V tt0 = dy1[0] * perk[0] - y1[0] * toik[0]; V tt1 = dy1[0] * peik[0] + y1[0] * tork[0]; Q for (int j=1; j<NW; ++j) qq0 += y1[j] * rork[j]; // real odd Q for (int j=1; j<NW; ++j) qq1 += y1[j] * roik[j]; // imag odd V for (int j=1; j<NW; ++j) ss0 += dy1[j] * terk[j] + y1[j] * poik[j]; V for (int j=1; j<NW; ++j) ss1 += dy1[j] * teik[j] - y1[j] * pork[j]; V for (int j=1; j<NW; ++j) tt0 += dy1[j] * perk[j] - y1[j] * toik[j]; V for (int j=1; j<NW; ++j) tt1 += dy1[j] * peik[j] + y1[j] * tork[j]; Q q[1] += v2d_reduce(qq0, qq1); V s[1] += v2d_reduce(ss0, ss1); V t[1] -= v2d_reduce(tt0, tt1); Q q+=2; V s+=2; t+=2; for (int j=0; j<NW; ++j) { V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*st2[j]) + vall(al[2])*dy1[j]; y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j]; } l+=2; al+=4; } if (l==llim) { Q rnd qq0 = y0[0] * rerk[0]; Q rnd qq1 = y0[0] * reik[0]; V rnd ss0 = dy0[0] * tork[0] + y0[0] * peik[0]; V rnd ss1 = dy0[0] * toik[0] - y0[0] * perk[0]; V rnd tt0 = dy0[0] * pork[0] - y0[0] * teik[0]; V rnd tt1 = dy0[0] * poik[0] + y0[0] * terk[0]; Q for (int j=1; j<NW; ++j) qq0 += y0[j] * rerk[j]; // real even Q for (int j=1; j<NW; ++j) qq1 += y0[j] * reik[j]; // imag even V for (int j=1; j<NW; ++j) ss0 += dy0[j] * tork[j] + y0[j] * peik[j]; V for (int j=1; j<NW; ++j) ss1 += dy0[j] * toik[j] - y0[j] * perk[j]; V for (int j=1; j<NW; ++j) tt0 += dy0[j] * pork[j] - y0[j] * teik[j]; V for (int j=1; j<NW; ++j) tt1 += dy0[j] * poik[j] + y0[j] * terk[j]; Q q[0] += v2d_reduce(qq0, qq1); V s[0] += v2d_reduce(ss0, ss1); V t[0] -= v2d_reduce(tt0, tt1); } } k+=NW; } while (k < nk); l = LiM(shtns, m, im); Q v2d *Ql = (v2d*) &Qlm[l]; V v2d *Sl = (v2d*) &Slm[l]; V v2d *Tl = (v2d*) &Tlm[l]; for (l=0; l<=llim-m; ++l) { QX Ql[l] = qq[l]; 3 Ql[l] = qq[l] * vdup(m_1); V Sl[l] = ss[l] * vdup(l_2[l+m]); V Tl[l] = tt[l] * vdup(l_2[l+m]); } #ifdef SHT_VAR_LTR for (l=llim+1-m; l<=LMAX-m; ++l) { Q Ql[l] = vdup(0.0); V Sl[l] = vdup(0.0); Tl[l] = vdup(0.0); } #endif } #endif } static QX void GEN3(spat_to_SH_mic,NWAY,SUFFIX)(shtns_cfg shtns, double *Vr, cplx *Qlm, long int llim) { VX void GEN3(spat_to_SHsphtor_mic,NWAY,SUFFIX)(shtns_cfg shtns, double *Vt, double *Vp, cplx *Slm, cplx *Tlm, long int llim) { 3 void GEN3(spat_to_SHqst_mic,NWAY,SUFFIX)(shtns_cfg shtns, double *Vr, double *Vt, double *Vp, cplx *Qlm, cplx *Slm, cplx *Tlm, long int llim) { Q double *BrF; // contains the Fourier transformed data V double *BtF, *BpF; // contains the Fourier transformed data unsigned imlim=0; Q BrF = Vr; V BtF = Vt; BpF = Vp; #ifndef SHT_AXISYM imlim = MTR; #ifdef SHT_VAR_LTR if (imlim*MRES > (unsigned) llim) imlim = ((unsigned) llim)/MRES; // 32bit mul and div should be faster #endif if (shtns->fftc_mode >= 0) { if (shtns->fftc_mode == 0) { // in-place Q fftw_execute_dft(shtns->fftc,(cplx*)BrF, (cplx*)BrF); V fftw_execute_dft(shtns->fftc,(cplx*)BtF, (cplx*)BtF); V fftw_execute_dft(shtns->fftc,(cplx*)BpF, (cplx*)BpF); } else { // alloc memory for the transpose FFT unsigned long nv = shtns->nspat; QX BrF = (double*) VMALLOC( nv * sizeof(double) ); VX BtF = (double*) VMALLOC( 2*nv * sizeof(double) ); VX BpF = BtF + nv; 3 BrF = (double*) VMALLOC( 3*nv * sizeof(double) ); 3 BtF = BrF + nv; BpF = BtF + nv; Q fftw_execute_split_dft(shtns->fftc, Vr+NPHI, Vr, BrF+1, BrF); V fftw_execute_split_dft(shtns->fftc, Vt+NPHI, Vt, BtF+1, BtF); V fftw_execute_split_dft(shtns->fftc, Vp+NPHI, Vp, BpF+1, BpF); } } #endif imlim += 1; #pragma omp parallel num_threads(shtns->nthreads) { QX GEN3(_an1,NWAY,SUFFIX)(shtns, BrF, Qlm, llim, imlim); VX GEN3(_an2,NWAY,SUFFIX)(shtns, BtF, BpF, Slm, Tlm, llim, imlim); 3 GEN3(_an3,NWAY,SUFFIX)(shtns, BrF, BtF, BpF, Qlm, Slm, Tlm, llim, imlim); } #ifndef SHT_AXISYM if (shtns->fftc_mode > 0) { // free memory Q VFREE(BrF); VX VFREE(BtF); // this frees also BpF. } #endif }
DRB060-matrixmultiply-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Classic i-k-j matrix multiplication */ #define N 100 #define M 100 #define K 100 #include <omp.h> double a[100][100]; double b[100][100]; double c[100][100]; int init() { int i; int j; int k; for (i = 0; i <= 99; i += 1) { for (k = 0; k <= 99; k += 1) { for (j = 0; j <= 99; j += 1) { c[i][j] = (i * j); a[i][k] = (i * j); b[k][j] = (i * j); } } } return 0; } int mmm() { int i; int j; int k; #pragma omp parallel for private (i,j,k) for (i = 0; i <= 99; i += 1) { for (k = 0; k <= 99; k += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 99; j += 1) { c[i][j] = c[i][j] + a[i][k] * b[k][j]; } } } return 0; } int print() { int i; int j; int k; for (i = 0; i <= 99; i += 1) { for (k = 0; k <= 99; k += 1) { for (j = 0; j <= 99; j += 1) { printf("%lf %lf %lf\n",c[i][j],a[i][k],b[k][j]); } } } return 0; } int main() { init(); mmm(); print(); return 0; }
rose_slowInput.c
#include <omp.h> typedef double real8; /************************************************************************ * Function : StressZero * * Purpose : ************************************************************************/ void StressZero(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,const real8 *fun2j,const real8 *shearMod,real8 eosvmax,real8 stresscut,const int *zoneset,const real8 *vc,int length) { int i; int index; /* This value 1.e-20 is used to prevent underflow. It is NOT a cuttoff. DO NOT TOUCH THIS VALE. */ real8 stress2 = stresscut * 1.e-20; real8 nstres2 = -stress2; #pragma omp parallel for private (index,i) firstprivate (length,stress2) for (i = 0; i <= length - 1; i += 1) { index = zoneset[i]; if (shearMod[zoneset[i]] == 0.0 || fun2j[i] < stresscut || vc[i] >= eosvmax) { newSxx[i] = 0.0; newSyy[i] = 0.0; newSzz[i] = 0.0; newTxy[i] = 0.0; newTxz[i] = 0.0; newTyz[i] = 0.0; } #if 1 if (newSxx[i] < stress2 && newSxx[i] > nstres2) newSxx[i] = 0.; if (newSyy[i] < stress2 && newSyy[i] > nstres2) newSyy[i] = 0.; if (newSzz[i] < stress2 && newSzz[i] > nstres2) newSzz[i] = 0.; if (newTxy[i] < stress2 && newTxy[i] > nstres2) newTxy[i] = 0.; if (newTxz[i] < stress2 && newTxz[i] > nstres2) newTxz[i] = 0.; if (newTyz[i] < stress2 && newTyz[i] > nstres2) newTyz[i] = 0.; #endif } }
GB_unop__exp_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp_fp32_fp32 // op(A') function: GB_unop_tran__exp_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = expf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = expf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = expf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = expf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_fp32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_int32 // op(A') function: GB_tran__lnot_fp32_int32 // C type: float // A type: int32_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_int32 ( float *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int32_uint64 // op(A') function: GB_tran__lnot_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int32_uint64 ( int32_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subassign_01.c
//------------------------------------------------------------------------------ // GB_subassign_01: C(I,J) = scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 01: C(I,J) = scalar ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: NULL // A: scalar // S: constructed // C: not bitmap #include "GB_subassign_methods.h" GrB_Info GB_subassign_01 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; // C must not be bitmap const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; const int64_t Cnvec = C->nvec ; GB_GET_SCALAR ; GB_GET_S ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 01: C(I,J) = scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal; must visit all IxJ, so Omega(|I|*|J|) is required. // Entries in S are found and the corresponding entry in C replaced with // the scalar. The traversal of S is identical to the traversal of M in // Method 4. // Method 01 and Method 03 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC) = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; if (!found) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, the scalar is present // [. A 1]: action: ( insert ) task_pending++ ; } else { // ----[C A 1] or [X A 1]----------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =A ): scalar to C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_noaccum_C_A_1_scalar ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC) = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; if (!found) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, the scalar is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } else { // both S (i,j) and A (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
Winograd_convolution.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <time.h> double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } // F(2x2,3x3) void winograd_GgGt_2x2(float* input, float* output, int K, int C) { int total_filter = K * C; int in_c_stride = 9, in_k_stride = in_c_stride * C; int out_c_stride = 16, out_k_stride = out_c_stride * C; #pragma omp parallel for for (int global_id = 0; global_id < total_filter; global_id++) { int k = global_id / C; int c = global_id % C; float tile[3][3], t_tile[4][3], f_tile[4][4]; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { tile[i][j] = input[in_k_stride * k + in_c_stride * c + 3 * i + j]; } } = // G * g for (int j = 0; j < 3; j++) { t_tile[0][j] = tile[0][j]; t_tile[1][j] = 0.5f * tile[0][j] + 0.5f * tile[1][j] + 0.5f * tile[2][j]; t_tile[2][j] = 0.5f * tile[0][j] - 0.5f * tile[1][j] + 0.5f * tile[2][j]; t_tile[3][j] = tile[2][j]; } // g * Gt for (int i = 0; i < 4; i++) { f_tile[i][0] = t_tile[i][0]; f_tile[i][1] = 0.5f * t_tile[i][0] + 0.5f * t_tile[i][1] + 0.5f * t_tile[i][2]; f_tile[i][2] = 0.5f * t_tile[i][0] - 0.5f * t_tile[i][1] + 0.5f * t_tile[i][2]; f_tile[i][3] = t_tile[i][2]; } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { output[out_k_stride * k + out_c_stride * c + 4 * i + j] = f_tile[i][j]; } } } } void winograd_BtdB_2x2(float* input, float* output, int batch_size, int C, int tile_n, int map_size) { int total_tile = batch_size * C * tile_n * tile_n; int in_n_stride = map_size * map_size * C, in_c_stride = map_size * map_size, x_stride = map_size, y_stride = 1; int out_n_stride = tile_n * tile_n * 16 * C, out_c_stride = tile_n * tile_n * 16; int tilei_stride = tile_n * 16, tilej_stride = 16; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (C * tile_n * tile_n); int remain = global_id % (C * tile_n * tile_n); int c = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[4][4], t_tile[4][4]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int x = 2 * tile_i + i; int y = 2 * tile_j + j; if (x >= map_size || y >= map_size) { tile[i][j] = 0; continue; } tile[i][j] = input[n * in_n_stride + c * in_c_stride + x * x_stride + y * y_stride]; } } // const float Bt[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, 0.0f, -1.0f} // } // Bt * d for (int j = 0; j < 4; j++) { t_tile[0][j] = tile[0][j] - tile[2][j]; t_tile[1][j] = tile[1][j] + tile[2][j]; t_tile[2][j] = -tile[1][j] + tile[2][j]; t_tile[3][j] = tile[1][j] - tile[3][j]; } // d * B for (int i = 0; i < 4; i++) { tile[i][0] = t_tile[i][0] - t_tile[i][2]; tile[i][1] = t_tile[i][1] + t_tile[i][2]; tile[i][2] = -t_tile[i][1] + t_tile[i][2]; tile[i][3] = t_tile[i][1] - t_tile[i][3]; } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { output[n * out_n_stride + c * out_c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 4 * i + j] = tile[i][j]; } } } } void winograd_BtdB_padding_2x2(float* input, float* output, int batch_size, int C, int tile_n, int map_size) { int total_tile = batch_size * C * tile_n * tile_n; int in_n_stride = map_size * map_size * C, in_c_stride = map_size * map_size, x_stride = map_size, y_stride = 1; int out_n_stride = tile_n * tile_n * 16 * C, out_c_stride = tile_n * tile_n * 16; int tilei_stride = tile_n * 16, tilej_stride = 16; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (C * tile_n * tile_n); int remain = global_id % (C * tile_n * tile_n); int c = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[4][4], t_tile[4][4]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int x = 2 * tile_i + i; int y = 2 * tile_j + j; if (x == 0 || y == 0 || x >= (map_size + 1) || y >= (map_size + 1)) { tile[i][j] = 0; } else { tile[i][j] = input[n * in_n_stride + c * in_c_stride + (x - 1) * x_stride + (y - 1) * y_stride]; } } } // const float Bt[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, 0.0f, -1.0f} // } // Bt * d for (int j = 0; j < 4; j++) { t_tile[0][j] = tile[0][j] - tile[2][j]; t_tile[1][j] = tile[1][j] + tile[2][j]; t_tile[2][j] = -tile[1][j] + tile[2][j]; t_tile[3][j] = tile[1][j] - tile[3][j]; } // d * B for (int i = 0; i < 4; i++) { tile[i][0] = t_tile[i][0] - t_tile[i][2]; tile[i][1] = t_tile[i][1] + t_tile[i][2]; tile[i][2] = -t_tile[i][1] + t_tile[i][2]; tile[i][3] = t_tile[i][1] - t_tile[i][3]; } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { output[n * out_n_stride + c * out_c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 4 * i + j] = tile[i][j]; } } } } void winograd_outerProduct_AtIA_2x2(float* input, float* weight, float* bias, float* output, int batch_size, int K, int tile_n, int out_map_size, int C) { int total_tile = batch_size * K * tile_n * tile_n; int c_stride = tile_n * tile_n * 16, in_n_stride = C * c_stride; int tilei_stride = tile_n * 16, tilej_stride = 16; int w_c_stride = 16, w_k_stride = C * 16; int out_k_stride = out_map_size * out_map_size, out_n_stride = out_k_stride * K; int x_stride = out_map_size, y_stride = 1; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (K * tile_n * tile_n); int remain = global_id % (K * tile_n * tile_n); int k = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[4][4] = {0}; for (int c = 0; c < C; c++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { tile[i][j] += input[n * in_n_stride + c * c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 4 * i + j] * weight[k * w_k_stride + c * w_c_stride + 4 * i + j]; } } } // const float At[2][4] { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, -1.0f} // } float t_tile[2][4], f_tile[2][2]; // At * I for (int j = 0; j < 4; j++) { t_tile[0][j] = tile[0][j] + tile[1][j] + tile[2][j]; t_tile[1][j] = tile[1][j] - tile[2][j] - tile[3][j]; } // I * A for (int i = 0; i < 2; i++) { f_tile[i][0] = t_tile[i][0] + t_tile[i][1] + t_tile[i][2]; f_tile[i][1] = t_tile[i][1] - t_tile[i][2] - t_tile[i][3]; } // bias for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { f_tile[i][j] += bias[k]; } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { int x = 2 * tile_i + i; int y = 2 * tile_j + j; if (x >= out_map_size || y >= out_map_size) { continue; } output[n * out_n_stride + k * out_k_stride + x * x_stride + y * y_stride] = f_tile[i][j]; } } } } void winograd_convolution_2x2(float* input, /* NxCxHxW */ float* weight, /* KxCx3x3 */ float* bias, /* K */ float* my_res, /* NxKxH'xW'*/ int batch_size, int C, int K, int map_size, int padding) { // filter transformation float* trans_filter = (float*)malloc(K * C * 16 * sizeof(float)); // transformed filters if (trans_filter == NULL) { printf("bad malloc trans_filter\n"); } winograd_GgGt_2x2(weight, trans_filter, K, C); int out_map_size = (map_size + padding * 2) - 2; // kernel size = 3, stride = 1 in Winograd algorithm int tile_n = (out_map_size + 1) / 2; float* trans_input = (float*)malloc(batch_size * tile_n * tile_n * C * 16 * sizeof(float)); // transformed input if (trans_input == NULL) { printf("bad malloc trans_input\n"); } // input transformation if (padding == 0) { winograd_BtdB_2x2(input, trans_input, batch_size, C, tile_n, map_size); } else if (padding == 1) { winograd_BtdB_padding_2x2(input, trans_input, batch_size, C, tile_n, map_size); } // element-wise multiplication & output transformation winograd_outerProduct_AtIA_2x2(trans_input, trans_filter, bias, my_res, batch_size, K, tile_n, out_map_size, C); free(trans_input); free(trans_filter); return; } // F(4x4,3x3) void winograd_GgGt_4x4(float* input, float* output, int K, int C) { int total_filter = K * C; int in_c_stride = 9, in_k_stride = in_c_stride * C; int out_c_stride = 36, out_k_stride = out_c_stride * C; #pragma omp parallel for for (int global_id = 0; global_id < total_filter; global_id++) { int k = global_id / C; int c = global_id % C; float tile[3][3], t_tile[6][3], f_tile[6][6]; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { tile[i][j] = input[in_k_stride * k + in_c_stride * c + 3 * i + j]; } } // const float G[6][3] = { // {0.25f, 0.0f, 0.0f}, // {-1.0f/6, -1.0f/6, -1.0f/6}, // {-1.0f/6, 1.0f/6, -1.0f/6}, // {1.0f/24, 1.0f/12, 1.0f/6}, // {1.0f/24, -1.0f/12, 1.0f/6}, // {0.0f, 0.0f, 1.0f} // } // G * g for (int j = 0; j < 3; j++) { t_tile[0][j] = 0.25f * tile[0][j]; t_tile[1][j] = -1.0f / 6 * tile[0][j] - 1.0f / 6 * tile[1][j] - 1.0f / 6 * tile[2][j]; t_tile[2][j] = -1.0f / 6 * tile[0][j] + 1.0f / 6 * tile[1][j] - 1.0f / 6 * tile[2][j]; t_tile[3][j] = 1.0f / 24 * tile[0][j] + 1.0f / 12 * tile[1][j] + 1.0f / 6 * tile[2][j]; t_tile[4][j] = 1.0f / 24 * tile[0][j] - 1.0f / 12 * tile[1][j] + 1.0f / 6 * tile[2][j]; t_tile[5][j] = tile[2][j]; } // g * Gt for (int i = 0; i < 6; i++) { f_tile[i][0] = 0.25f * t_tile[i][0]; f_tile[i][1] = -1.0f / 6 * t_tile[i][0] - 1.0f / 6 * t_tile[i][1] - 1.0f / 6 * t_tile[i][2]; f_tile[i][2] = -1.0f / 6 * t_tile[i][0] + 1.0f / 6 * t_tile[i][1] - 1.0f / 6 * t_tile[i][2]; f_tile[i][3] = 1.0f / 24 * t_tile[i][0] + 1.0f / 12 * t_tile[i][1] + 1.0f / 6 * t_tile[i][2]; f_tile[i][4] = 1.0f / 24 * t_tile[i][0] - 1.0f / 12 * t_tile[i][1] + 1.0f / 6 * t_tile[i][2]; f_tile[i][5] = t_tile[i][2]; } for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { output[out_k_stride * k + out_c_stride * c + 6 * i + j] = f_tile[i][j]; } } } } void winograd_BtdB_4x4(float* input, float* output, int batch_size, int C, int tile_n, int map_size) { int total_tile = batch_size * C * tile_n * tile_n; int in_n_stride = map_size * map_size * C, in_c_stride = map_size * map_size, x_stride = map_size, y_stride = 1; int out_n_stride = tile_n * tile_n * 36 * C, out_c_stride = tile_n * tile_n * 36; int tilei_stride = tile_n * 36, tilej_stride = 36; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (C * tile_n * tile_n); int remain = global_id % (C * tile_n * tile_n); int c = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[6][6], t_tile[6][6]; for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { int x = 4 * tile_i + i; int y = 4 * tile_j + j; if (x >= map_size || y >= map_size) { tile[i][j] = 0; continue; } tile[i][j] = input[n * in_n_stride + c * in_c_stride + x * x_stride + y * y_stride]; } } // const float Bt[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f, -4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f, -1.0f, 1.0f, 0.0f}, // {0.0f, -2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f, -2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f, -5.0f, 0.0f, 1.0f} // } // Bt * d for (int j = 0; j < 6; j++) { t_tile[0][j] = 4.0f * tile[0][j] - 5.0f * tile[2][j] + tile[4][j]; t_tile[1][j] = -4.0f * tile[1][j] - 4.0f * tile[2][j] + tile[3][j] + tile[4][j]; t_tile[2][j] = 4.0f * tile[1][j] - 4.0f * tile[2][j] - tile[3][j] + tile[4][j]; t_tile[3][j] = -2.0f * tile[1][j] - tile[2][j] + 2.0f * tile[3][j] + tile[4][j]; t_tile[4][j] = 2.0f * tile[1][j] - tile[2][j] - 2.0f * tile[3][j] + tile[4][j]; t_tile[5][j] = 4.0f * tile[1][j] - 5.0f * tile[3][j] + tile[5][j]; } // d * B for (int i = 0; i < 6; i++) { tile[i][0] = 4.0f * t_tile[i][0] - 5.0f * t_tile[i][2] + t_tile[i][4]; tile[i][1] = -4.0f * t_tile[i][1] - 4.0f * t_tile[i][2] + t_tile[i][3] + t_tile[i][4]; tile[i][2] = 4.0f * t_tile[i][1] - 4.0f * t_tile[i][2] - t_tile[i][3] + t_tile[i][4]; tile[i][3] = -2.0f * t_tile[i][1] - t_tile[i][2] + 2.0f * t_tile[i][3] + t_tile[i][4]; tile[i][4] = 2.0f * t_tile[i][1] - t_tile[i][2] - 2.0f * t_tile[i][3] + t_tile[i][4]; tile[i][5] = 4.0f * t_tile[i][1] - 5.0f * t_tile[i][3] + t_tile[i][5]; } for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { output[n * out_n_stride + c * out_c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 6 * i + j] = tile[i][j]; } } } } void winograd_BtdB_padding_4x4(float* input, float* output, int batch_size, int C, int tile_n, int map_size) { int total_tile = batch_size * C * tile_n * tile_n; int in_n_stride = map_size * map_size * C, in_c_stride = map_size * map_size, x_stride = map_size, y_stride = 1; int out_n_stride = tile_n * tile_n * 36 * C, out_c_stride = tile_n * tile_n * 36; int tilei_stride = tile_n * 36, tilej_stride = 36; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (C * tile_n * tile_n); int remain = global_id % (C * tile_n * tile_n); int c = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[6][6], t_tile[6][6]; for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { int x = 4 * tile_i + i; int y = 4 * tile_j + j; if (x == 0 || y == 0 || x >= (map_size + 1) || y >= (map_size + 1)) { tile[i][j] = 0; } else { tile[i][j] = input[n * in_n_stride + c * in_c_stride + (x - 1) * x_stride + (y - 1) * y_stride]; } } } // const float Bt[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f, -4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f, -1.0f, 1.0f, 0.0f}, // {0.0f, -2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f, -2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f, -5.0f, 0.0f, 1.0f} // } // Bt * d for (int j = 0; j < 6; j++) { t_tile[0][j] = 4.0f * tile[0][j] - 5.0f * tile[2][j] + tile[4][j]; t_tile[1][j] = -4.0f * tile[1][j] - 4.0f * tile[2][j] + tile[3][j] + tile[4][j]; t_tile[2][j] = 4.0f * tile[1][j] - 4.0f * tile[2][j] - tile[3][j] + tile[4][j]; t_tile[3][j] = -2.0f * tile[1][j] - tile[2][j] + 2.0f * tile[3][j] + tile[4][j]; t_tile[4][j] = 2.0f * tile[1][j] - tile[2][j] - 2.0f * tile[3][j] + tile[4][j]; t_tile[5][j] = 4.0f * tile[1][j] - 5.0f * tile[3][j] + tile[5][j]; } // d * B for (int i = 0; i < 6; i++) { tile[i][0] = 4.0f * t_tile[i][0] - 5.0f * t_tile[i][2] + t_tile[i][4]; tile[i][1] = -4.0f * t_tile[i][1] - 4.0f * t_tile[i][2] + t_tile[i][3] + t_tile[i][4]; tile[i][2] = 4.0f * t_tile[i][1] - 4.0f * t_tile[i][2] - t_tile[i][3] + t_tile[i][4]; tile[i][3] = -2.0f * t_tile[i][1] - t_tile[i][2] + 2.0f * t_tile[i][3] + t_tile[i][4]; tile[i][4] = 2.0f * t_tile[i][1] - t_tile[i][2] - 2.0f * t_tile[i][3] + t_tile[i][4]; tile[i][5] = 4.0f * t_tile[i][1] - 5.0f * t_tile[i][3] + t_tile[i][5]; } for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { output[n * out_n_stride + c * out_c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 6 * i + j] = tile[i][j]; } } } } void winograd_outerProduct_AtIA_4x4(float* input, float* weight, float* bias, float* output, int batch_size, int K, int tile_n, int out_map_size, int C) { int total_tile = batch_size * K * tile_n * tile_n; int c_stride = tile_n * tile_n * 36, in_n_stride = C * c_stride; int tilei_stride = tile_n * 36, tilej_stride = 36; int w_c_stride = 36, w_k_stride = C * 36; int out_k_stride = out_map_size * out_map_size, out_n_stride = out_k_stride * K; int x_stride = out_map_size, y_stride = 1; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (K * tile_n * tile_n); int remain = global_id % (K * tile_n * tile_n); int k = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[6][6] = {0}; for (int c = 0; c < C; c++) { for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { tile[i][j] += input[n * in_n_stride + c * c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 6 * i + j] * weight[k * w_k_stride + c * w_c_stride + 6 * i + j]; } } } // const float At[4][6] { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // } float t_tile[4][6], f_tile[4][4]; // At * I for (int j = 0; j < 6; j++) { t_tile[0][j] = tile[0][j] + tile[1][j] + tile[2][j] + tile[3][j] + tile[4][j]; t_tile[1][j] = tile[1][j] - tile[2][j] + 2.0f * tile[3][j] - 2.0f * tile[4][j]; t_tile[2][j] = tile[1][j] + tile[2][j] + 4.0f * tile[3][j] + 4.0f * tile[4][j]; t_tile[3][j] = tile[1][j] - tile[2][j] + 8.0f * tile[3][j] - 8.0f * tile[4][j] + tile[5][j]; } // I * A for (int i = 0; i < 4; i++) { f_tile[i][0] = t_tile[i][0] + t_tile[i][1] + t_tile[i][2] + t_tile[i][3] + t_tile[i][4]; f_tile[i][1] = t_tile[i][1] - t_tile[i][2] + 2.0f * t_tile[i][3] - 2.0f * t_tile[i][4]; f_tile[i][2] = t_tile[i][1] + t_tile[i][2] + 4.0f * t_tile[i][3] + 4.0f * t_tile[i][4]; f_tile[i][3] = t_tile[i][1] - t_tile[i][2] + 8.0f * t_tile[i][3] - 8.0f * t_tile[i][4] + t_tile[i][5]; } // bias for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { f_tile[i][j] += bias[k]; } } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int x = 4 * tile_i + i; int y = 4 * tile_j + j; if (x >= out_map_size || y >= out_map_size) { continue; } output[n * out_n_stride + k * out_k_stride + x * x_stride + y * y_stride] = f_tile[i][j]; } } } } void winograd_convolution_4x4(float* input, /* NxCxHxW */ float* weight, /* KxCx3x3 */ float* bias, /* K */ float* my_res, /* NxKxH'xW'*/ int batch_size, int C, int K, int map_size, int padding) { // filter transformation float* trans_filter = (float*)malloc(K * C * 36 * sizeof(float)); // transformed filters if (trans_filter == NULL) { printf("bad malloc trans_filter\n"); } winograd_GgGt_4x4(weight, trans_filter, K, C); int out_map_size = (map_size + padding * 2) - 2; // kernel size = 3, stride = 1 in Winograd algorithm int tile_n = (out_map_size + 3) / 4; float* trans_input = (float*)malloc(batch_size * tile_n * tile_n * C * 36 * sizeof(float)); // transformed input if (trans_input == NULL) { printf("bad malloc trans_input\n"); } // input transformation if (padding == 0) { winograd_BtdB_4x4(input, trans_input, batch_size, C, tile_n, map_size); } else if (padding == 1) { winograd_BtdB_padding_4x4(input, trans_input, batch_size, C, tile_n, map_size); } // element-wise multiplication & output transformation winograd_outerProduct_AtIA_4x4(trans_input, trans_filter, bias, my_res, batch_size, K, tile_n, out_map_size, C); free(trans_input); free(trans_filter); return; } // F(6x6,3x3) void winograd_GgGt_6x6(float* input, float* output, int K, int C) { int total_filter = K * C; int in_c_stride = 9, in_k_stride = in_c_stride * C; int out_c_stride = 64, out_k_stride = out_c_stride * C; #pragma omp parallel for for (int global_id = 0; global_id < total_filter; global_id++) { int k = global_id / C; int c = global_id % C; float tile[3][3], t_tile[8][3], f_tile[8][8]; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { tile[i][j] = input[in_k_stride * k + in_c_stride * c + 3 * i + j]; } } // const float G[8][3] = { // { 1.0f, 0.0f, 0.0f}, // {-2.0f/9, -2.0f/9, -2.0f/9}, // {-2.0f/9, 2.0f/9, -2.0f/9}, // {1.0f/90, 1.0f/45, 2.0f/45}, // {1.0f/90, -1.0f/45, 2.0f/45}, // {1.0f/45, 1.0f/90, 1.0f/180}, // {1.0f/45, -1.0f/90, 1.0f/180}, // { 0.0f, 0.0f, 1.0f} // }; // G * g for (int j = 0; j < 3; j++) { t_tile[0][j] = tile[0][j]; t_tile[1][j] = -2.0f / 9 * tile[0][j] - 2.0f / 9 * tile[1][j] - 2.0f / 9 * tile[2][j]; t_tile[2][j] = -2.0f / 9 * tile[0][j] + 2.0f / 9 * tile[1][j] - 2.0f / 9 * tile[2][j]; t_tile[3][j] = 1.0f / 90 * tile[0][j] + 1.0f / 45 * tile[1][j] + 2.0f / 45 * tile[2][j]; t_tile[4][j] = 1.0f / 90 * tile[0][j] - 1.0f / 45 * tile[1][j] + 2.0f / 45 * tile[2][j]; t_tile[5][j] = 1.0f / 45 * tile[0][j] + 1.0f / 90 * tile[1][j] + 1.0f / 180 * tile[2][j]; t_tile[6][j] = 1.0f / 45 * tile[0][j] - 1.0f / 90 * tile[1][j] + 1.0f / 180 * tile[2][j]; t_tile[7][j] = tile[2][j]; } // g * Gt for (int i = 0; i < 8; i++) { f_tile[i][0] = t_tile[i][0]; f_tile[i][1] = -2.0f / 9 * t_tile[i][0] - 2.0f / 9 * t_tile[i][1] - 2.0f / 9 * t_tile[i][2]; f_tile[i][2] = -2.0f / 9 * t_tile[i][0] + 2.0f / 9 * t_tile[i][1] - 2.0f / 9 * t_tile[i][2]; f_tile[i][3] = 1.0f / 90 * t_tile[i][0] + 1.0f / 45 * t_tile[i][1] + 2.0f / 45 * t_tile[i][2]; f_tile[i][4] = 1.0f / 90 * t_tile[i][0] - 1.0f / 45 * t_tile[i][1] + 2.0f / 45 * t_tile[i][2]; f_tile[i][5] = 1.0f / 45 * t_tile[i][0] + 1.0f / 90 * t_tile[i][1] + 1.0f / 180 * t_tile[i][2]; f_tile[i][6] = 1.0f / 45 * t_tile[i][0] - 1.0f / 90 * t_tile[i][1] + 1.0f / 180 * t_tile[i][2]; f_tile[i][7] = t_tile[i][2]; } for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { output[out_k_stride * k + out_c_stride * c + 8 * i + j] = f_tile[i][j]; } } } } void winograd_BtdB_6x6(float* input, float* output, int batch_size, int C, int tile_n, int map_size) { int total_tile = batch_size * C * tile_n * tile_n; int in_n_stride = map_size * map_size * C, in_c_stride = map_size * map_size, x_stride = map_size, y_stride = 1; int out_n_stride = tile_n * tile_n * 64 * C, out_c_stride = tile_n * tile_n * 64; int tilei_stride = tile_n * 64, tilej_stride = 64; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (C * tile_n * tile_n); int remain = global_id % (C * tile_n * tile_n); int c = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[8][8], t_tile[8][8]; for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { int x = 6 * tile_i + i; int y = 6 * tile_j + j; if (x >= map_size || y >= map_size) { tile[i][j] = 0; continue; } tile[i][j] = input[n * in_n_stride + c * in_c_stride + x * x_stride + y * y_stride]; } } // const float Bt[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // Bt * d for (int j = 0; j < 8; j++) { t_tile[0][j] = tile[0][j] - 5.25f * tile[2][j] + 5.25 * tile[4][j] - tile[6][j]; t_tile[1][j] = tile[1][j] + tile[2][j] - 4.25f * tile[3][j] - 4.25f * tile[4][j] + tile[5][j] + tile[6][j]; t_tile[2][j] = -tile[1][j] + tile[2][j] + 4.25f * tile[3][j] - 4.25f * tile[4][j] - tile[5][j] + tile[6][j]; t_tile[3][j] = 0.5f * tile[1][j] + 0.25f * tile[2][j] - 2.5f * tile[3][j] - 1.25f * tile[4][j] + 2.0f * tile[5][j] + tile[6][j]; t_tile[4][j] = -0.5f * tile[1][j] + 0.25f * tile[2][j] + 2.5f * tile[3][j] - 1.25f * tile[4][j] - 2.0f * tile[5][j] + tile[6][j]; t_tile[5][j] = 2.0f * tile[1][j] + 4.0f * tile[2][j] - 2.5f * tile[3][j] - 5.0f * tile[4][j] + 0.5f * tile[5][j] + tile[6][j]; t_tile[6][j] = -2.0f * tile[1][j] + 4.0f * tile[2][j] + 2.5f * tile[3][j] - 5.0f * tile[4][j] - 0.5f * tile[5][j] + tile[6][j]; t_tile[7][j] = -tile[0][j] + 5.25f * tile[2][j] - 5.25 * tile[4][j] + tile[6][j]; } // d * B for (int i = 0; i < 8; i++) { tile[i][0] = t_tile[i][0] - 5.25f * t_tile[i][2] + 5.25 * t_tile[i][4] - t_tile[i][6]; tile[i][1] = t_tile[i][1] + t_tile[i][2] - 4.25f * t_tile[i][3] - 4.25f * t_tile[i][4] + t_tile[i][5] + t_tile[i][6]; tile[i][2] = -t_tile[i][1] + t_tile[i][2] + 4.25f * t_tile[i][3] - 4.25f * t_tile[i][4] - t_tile[i][5] + t_tile[i][6]; tile[i][3] = 0.5f * t_tile[i][1] + 0.25f * t_tile[i][2] - 2.5f * t_tile[i][3] - 1.25f * t_tile[i][4] + 2.0f * t_tile[i][5] + t_tile[i][6]; tile[i][4] = -0.5f * t_tile[i][1] + 0.25f * t_tile[i][2] + 2.5f * t_tile[i][3] - 1.25f * t_tile[i][4] - 2.0f * t_tile[i][5] + t_tile[i][6]; tile[i][5] = 2.0f * t_tile[i][1] + 4.0f * t_tile[i][2] - 2.5f * t_tile[i][3] - 5.0f * t_tile[i][4] + 0.5f * t_tile[i][5] + t_tile[i][6]; tile[i][6] = -2.0f * t_tile[i][1] + 4.0f * t_tile[i][2] + 2.5f * t_tile[i][3] - 5.0f * t_tile[i][4] - 0.5f * t_tile[i][5] + t_tile[i][6]; tile[i][7] = -t_tile[i][0] + 5.25f * t_tile[i][2] - 5.25 * t_tile[i][4] + t_tile[i][6]; } for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { output[n * out_n_stride + c * out_c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 8 * i + j] = tile[i][j]; } } } } void winograd_BtdB_padding_6x6(float* input, float* output, int batch_size, int C, int tile_n, int map_size) { int total_tile = batch_size * C * tile_n * tile_n; int in_n_stride = map_size * map_size * C, in_c_stride = map_size * map_size, x_stride = map_size, y_stride = 1; int out_n_stride = tile_n * tile_n * 64 * C, out_c_stride = tile_n * tile_n * 64; int tilei_stride = tile_n * 64, tilej_stride = 64; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (C * tile_n * tile_n); int remain = global_id % (C * tile_n * tile_n); int c = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[8][8], t_tile[8][8]; for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { int x = 6 * tile_i + i; int y = 6 * tile_j + j; if (x == 0 || y == 0 || x >= (map_size + 1) || y >= (map_size + 1)) { tile[i][j] = 0; } else { tile[i][j] = input[n * in_n_stride + c * in_c_stride + (x - 1) * x_stride + (y - 1) * y_stride]; } } } // const float Bt[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // Bt * d for (int j = 0; j < 8; j++) { t_tile[0][j] = tile[0][j] - 5.25f * tile[2][j] + 5.25 * tile[4][j] - tile[6][j]; t_tile[1][j] = tile[1][j] + tile[2][j] - 4.25f * tile[3][j] - 4.25f * tile[4][j] + tile[5][j] + tile[6][j]; t_tile[2][j] = -tile[1][j] + tile[2][j] + 4.25f * tile[3][j] - 4.25f * tile[4][j] - tile[5][j] + tile[6][j]; t_tile[3][j] = 0.5f * tile[1][j] + 0.25f * tile[2][j] - 2.5f * tile[3][j] - 1.25f * tile[4][j] + 2.0f * tile[5][j] + tile[6][j]; t_tile[4][j] = -0.5f * tile[1][j] + 0.25f * tile[2][j] + 2.5f * tile[3][j] - 1.25f * tile[4][j] - 2.0f * tile[5][j] + tile[6][j]; t_tile[5][j] = 2.0f * tile[1][j] + 4.0f * tile[2][j] - 2.5f * tile[3][j] - 5.0f * tile[4][j] + 0.5f * tile[5][j] + tile[6][j]; t_tile[6][j] = -2.0f * tile[1][j] + 4.0f * tile[2][j] + 2.5f * tile[3][j] - 5.0f * tile[4][j] - 0.5f * tile[5][j] + tile[6][j]; t_tile[7][j] = -tile[0][j] + 5.25f * tile[2][j] - 5.25 * tile[4][j] + tile[6][j]; } // d * B for (int i = 0; i < 8; i++) { tile[i][0] = t_tile[i][0] - 5.25f * t_tile[i][2] + 5.25 * t_tile[i][4] - t_tile[i][6]; tile[i][1] = t_tile[i][1] + t_tile[i][2] - 4.25f * t_tile[i][3] - 4.25f * t_tile[i][4] + t_tile[i][5] + t_tile[i][6]; tile[i][2] = -t_tile[i][1] + t_tile[i][2] + 4.25f * t_tile[i][3] - 4.25f * t_tile[i][4] - t_tile[i][5] + t_tile[i][6]; tile[i][3] = 0.5f * t_tile[i][1] + 0.25f * t_tile[i][2] - 2.5f * t_tile[i][3] - 1.25f * t_tile[i][4] + 2.0f * t_tile[i][5] + t_tile[i][6]; tile[i][4] = -0.5f * t_tile[i][1] + 0.25f * t_tile[i][2] + 2.5f * t_tile[i][3] - 1.25f * t_tile[i][4] - 2.0f * t_tile[i][5] + t_tile[i][6]; tile[i][5] = 2.0f * t_tile[i][1] + 4.0f * t_tile[i][2] - 2.5f * t_tile[i][3] - 5.0f * t_tile[i][4] + 0.5f * t_tile[i][5] + t_tile[i][6]; tile[i][6] = -2.0f * t_tile[i][1] + 4.0f * t_tile[i][2] + 2.5f * t_tile[i][3] - 5.0f * t_tile[i][4] - 0.5f * t_tile[i][5] + t_tile[i][6]; tile[i][7] = -t_tile[i][0] + 5.25f * t_tile[i][2] - 5.25 * t_tile[i][4] + t_tile[i][6]; } for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { output[n * out_n_stride + c * out_c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 8 * i + j] = tile[i][j]; } } } } void winograd_outerProduct_AtIA_6x6(float* input, float* weight, float* bias, float* output, int batch_size, int K, int tile_n, int out_map_size, int C) { int total_tile = batch_size * K * tile_n * tile_n; int c_stride = tile_n * tile_n * 64, in_n_stride = C * c_stride; int tilei_stride = tile_n * 64, tilej_stride = 64; int w_c_stride = 64, w_k_stride = C * 64; int out_k_stride = out_map_size * out_map_size, out_n_stride = out_k_stride * K; int x_stride = out_map_size, y_stride = 1; #pragma omp parallel for for (int global_id = 0; global_id < total_tile; global_id++) { int n = global_id / (K * tile_n * tile_n); int remain = global_id % (K * tile_n * tile_n); int k = remain / (tile_n * tile_n); remain = remain % (tile_n * tile_n); int tile_i = remain / tile_n; int tile_j = remain % tile_n; float tile[8][8] = {0}; for (int c = 0; c < C; c++) { for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { tile[i][j] += input[n * in_n_stride + c * c_stride + tile_i * tilei_stride + tile_j * tilej_stride + 8 * i + j] * weight[k * w_k_stride + c * w_c_stride + 8 * i + j]; } } } // const float At[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; float t_tile[6][8], f_tile[6][6]; // At * I for (int j = 0; j < 8; j++) { t_tile[0][j] = tile[0][j] + tile[1][j] + tile[2][j] + tile[3][j] + tile[4][j] + 32.0f * tile[5][j] + 32.0f * tile[6][j]; t_tile[1][j] = tile[1][j] - tile[2][j] + 2.0f * tile[3][j] - 2.0f * tile[4][j] + 16.0f * tile[5][j] - 16.0f * tile[6][j]; t_tile[2][j] = tile[1][j] + tile[2][j] + 4.0f * tile[3][j] + 4.0f * tile[4][j] + 8.0f * tile[5][j] + 8.0f * tile[6][j]; t_tile[3][j] = tile[1][j] - tile[2][j] + 8.0f * tile[3][j] - 8.0f * tile[4][j] + 4.0f * tile[5][j] - 4.0f * tile[6][j]; t_tile[4][j] = tile[1][j] + tile[2][j] + 16.0f * tile[3][j] + 16.0f * tile[4][j] + 2.0f * tile[5][j] + 2.0f * tile[6][j]; t_tile[5][j] = tile[1][j] - tile[2][j] + 32.0f * tile[3][j] - 32.0f * tile[4][j] + tile[5][j] - tile[6][j] + tile[7][j]; } // I * A for (int i = 0; i < 6; i++) { f_tile[i][0] = t_tile[i][0] + t_tile[i][1] + t_tile[i][2] + t_tile[i][3] + t_tile[i][4] + 32.0f * t_tile[i][5] + 32.0f * t_tile[i][6]; f_tile[i][1] = t_tile[i][1] - t_tile[i][2] + 2.0f * t_tile[i][3] - 2.0f * t_tile[i][4] + 16.0f * t_tile[i][5] - 16.0f * t_tile[i][6]; f_tile[i][2] = t_tile[i][1] + t_tile[i][2] + 4.0f * t_tile[i][3] + 4.0f * t_tile[i][4] + 8.0f * t_tile[i][5] + 8.0f * t_tile[i][6]; f_tile[i][3] = t_tile[i][1] - t_tile[i][2] + 8.0f * t_tile[i][3] - 8.0f * t_tile[i][4] + 4.0f * t_tile[i][5] - 4.0f * t_tile[i][6]; f_tile[i][4] = t_tile[i][1] + t_tile[i][2] + 16.0f * t_tile[i][3] + 16.0f * t_tile[i][4] + 2.0f * t_tile[i][5] + 2.0f * t_tile[i][6]; f_tile[i][5] = t_tile[i][1] - t_tile[i][2] + 32.0f * t_tile[i][3] - 32.0f * t_tile[i][4] + t_tile[i][5] - t_tile[i][6] + t_tile[i][7]; } // bias for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { f_tile[i][j] += bias[k]; } } for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { int x = 6 * tile_i + i; int y = 6 * tile_j + j; if (x >= out_map_size || y >= out_map_size) { continue; } output[n * out_n_stride + k * out_k_stride + x * x_stride + y * y_stride] = f_tile[i][j]; } } } } void winograd_convolution_6x6(float* input, /* NxCxHxW */ float* weight, /* KxCx3x3 */ float* bias, /* K */ float* my_res, /* NxKxH'xW'*/ int batch_size, int C, int K, int map_size, int padding) { // filter transformation float* trans_filter = (float*)malloc(K * C * 64 * sizeof(float)); // transformed filters if (trans_filter == NULL) { printf("bad malloc trans_filter\n"); } winograd_GgGt_6x6(weight, trans_filter, K, C); int out_map_size = (map_size + padding * 2) - 2; // kernel size = 3, stride = 1 in Winograd algorithm int tile_n = (out_map_size + 5) / 6; float* trans_input = (float*)malloc(batch_size * tile_n * tile_n * C * 64 * sizeof(float)); // transformed input if (trans_input == NULL) { printf("bad malloc trans_input\n"); } // input transformation if (padding == 0) { winograd_BtdB_6x6(input, trans_input, batch_size, C, tile_n, map_size); } else if (padding == 1) { winograd_BtdB_padding_6x6(input, trans_input, batch_size, C, tile_n, map_size); } // element-wise multiplication & output transformation winograd_outerProduct_AtIA_6x6(trans_input, trans_filter, bias, my_res, batch_size, K, tile_n, out_map_size, C); free(trans_input); free(trans_filter); return; } void init(float* A, int size) { for (int i = 0; i < size; i++) { A[i] = (float)rand() / RAND_MAX; } } int main(int argc, char* argv[]) { if (argc != 7) { printf("usage: ./test < N > < C > < H(W) > < K > <padding(0/1)> < m(2/4/6) > \n"); exit(0); } int batch_size = atoi(argv[1]); int C = atoi(argv[2]); int map_size = atoi(argv[3]); int K = atoi(argv[4]); int padding = atoi(argv[5]); int m = atoi(argv[6]); double t_start, t_end; float* input = (float*)malloc(batch_size * C * map_size * map_size * sizeof(float)); float* weight = (float*)malloc(K * C * 3 * 3 * sizeof(float)); float* bias = (float*)malloc(K * sizeof(float)); float* result = (float*)malloc(batch_size * K * map_size * map_size * sizeof(float)); init(input, batch_size * C * map_size * map_size); init(weight, K * C * 3 * 3); init(bias, K); t_start = rtclock(); switch (m) { case 2: winograd_convolution_2x2(input, weight, bias, result, batch_size, C, K, map_size, padding); break; case 4: winograd_convolution_4x4(input, weight, bias, result, batch_size, C, K, map_size, padding); break; case 6: winograd_convolution_6x6(input, weight, bias, result, batch_size, C, K, map_size, padding); break; default: break; } t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); free(input); free(weight); free(bias); free(result); return 0; }
io_test.h
#include <mpi.h> extern int local_cell_blocks; extern int local_edge_blocks; #include "io.h" void TestIO(GRID * g) { // TODO: seed the rngfalse printf("Init write...\n"); io_write_init(g, "temp_netcdf_test_output.cdf"); struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *test_output1; io_var_t io_test_output1; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *test_input1; io_var_t io_test_input1; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *test_output2; io_var_t io_test_output2; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *test_input2; io_var_t io_test_input2; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *test_output3; io_var_t io_test_output3; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *test_input3; io_var_t io_test_input3; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *test_output4; io_var_t io_test_output4; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *test_input4; io_var_t io_test_input4; { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); test_output1 = malloc(24); test_output1->name = "test_output1"; test_output1->loc = 0; test_output1->dim = 3; test_output1->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *)); char *pos = (char *) test_output1->data_pointer.p3 + num_blocks * sizeof(char *); char *pos2 = (char *) test_output1->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *); for (int b = 0; b < num_blocks; b++) { test_output1->data_pointer.p3[b] = (GVAL * *)pos; pos += g->height * sizeof(char *); for (int k = 0; k < g->height; k++) { test_output1->data_pointer.p3[b][k] = (GVAL *) pos2; pos2 += g->blkSize * sizeof(GVAL); for (int c = 0; c < g->blkSize; c++) { test_output1->data_pointer.p3[b][k][c] = (GVAL) 0; } } } } { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); test_input1 = malloc(24); test_input1->name = "test_input1"; test_input1->loc = 0; test_input1->dim = 3; test_input1->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *)); char *pos = (char *) test_input1->data_pointer.p3 + num_blocks * sizeof(char *); char *pos2 = (char *) test_input1->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *); for (int b = 0; b < num_blocks; b++) { test_input1->data_pointer.p3[b] = (GVAL * *)pos; pos += g->height * sizeof(char *); for (int k = 0; k < g->height; k++) { test_input1->data_pointer.p3[b][k] = (GVAL *) pos2; pos2 += g->blkSize * sizeof(GVAL); for (int c = 0; c < g->blkSize; c++) { test_input1->data_pointer.p3[b][k][c] = (GVAL) 0; } } } } { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); test_output2 = malloc(24); test_output2->name = "test_output2"; test_output2->loc = 1; test_output2->dim = 3; test_output2->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *)); char *pos = (char *) test_output2->data_pointer.p3 + num_blocks * sizeof(char *); char *pos2 = (char *) test_output2->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *); for (int b = 0; b < num_blocks; b++) { test_output2->data_pointer.p3[b] = (GVAL * *)pos; pos += g->height * sizeof(char *); for (int k = 0; k < g->height; k++) { test_output2->data_pointer.p3[b][k] = (GVAL *) pos2; pos2 += g->blkSize * sizeof(GVAL); for (int e = 0; e < g->blkSize; e++) { test_output2->data_pointer.p3[b][k][e] = (GVAL) 0; } } } } { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); test_input2 = malloc(24); test_input2->name = "test_input2"; test_input2->loc = 1; test_input2->dim = 3; test_input2->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *)); char *pos = (char *) test_input2->data_pointer.p3 + num_blocks * sizeof(char *); char *pos2 = (char *) test_input2->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *); for (int b = 0; b < num_blocks; b++) { test_input2->data_pointer.p3[b] = (GVAL * *)pos; pos += g->height * sizeof(char *); for (int k = 0; k < g->height; k++) { test_input2->data_pointer.p3[b][k] = (GVAL *) pos2; pos2 += g->blkSize * sizeof(GVAL); for (int e = 0; e < g->blkSize; e++) { test_input2->data_pointer.p3[b][k][e] = (GVAL) 0; } } } } { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); test_output3 = malloc(24); test_output3->name = "test_output3"; test_output3->loc = 0; test_output3->dim = 2; test_output3->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(GVAL) + (num_blocks) * sizeof(char *)); char *pos = (char *) test_output3->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { test_output3->data_pointer.p2[b] = (GVAL *) pos; pos += g->blkSize * sizeof(GVAL); for (int c = 0; c < g->blkSize; c++) { test_output3->data_pointer.p2[b][c] = (GVAL) 0; } } } { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); test_input3 = malloc(24); test_input3->name = "test_input3"; test_input3->loc = 0; test_input3->dim = 2; test_input3->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(GVAL) + (num_blocks) * sizeof(char *)); char *pos = (char *) test_input3->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { test_input3->data_pointer.p2[b] = (GVAL *) pos; pos += g->blkSize * sizeof(GVAL); for (int c = 0; c < g->blkSize; c++) { test_input3->data_pointer.p2[b][c] = (GVAL) 0; } } } { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); test_output4 = malloc(24); test_output4->name = "test_output4"; test_output4->loc = 1; test_output4->dim = 2; test_output4->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(GVAL) + (num_blocks) * sizeof(char *)); char *pos = (char *) test_output4->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { test_output4->data_pointer.p2[b] = (GVAL *) pos; pos += g->blkSize * sizeof(GVAL); for (int e = 0; e < g->blkSize; e++) { test_output4->data_pointer.p2[b][e] = (GVAL) 0; } } } { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); test_input4 = malloc(24); test_input4->name = "test_input4"; test_input4->loc = 1; test_input4->dim = 2; test_input4->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(GVAL) + (num_blocks) * sizeof(char *)); char *pos = (char *) test_input4->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { test_input4->data_pointer.p2[b] = (GVAL *) pos; pos += g->blkSize * sizeof(GVAL); for (int e = 0; e < g->blkSize; e++) { test_input4->data_pointer.p2[b][e] = (GVAL) 0; } } } printf("Generate test values...\n"); { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { test_output1->data_pointer.p3[(block_index)][(height_index)][(cell_index)] = (float) rand(); } } } } { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { test_output3->data_pointer.p2[(block_index)][(cell_index)] = (float) rand(); } } } { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { test_output2->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = (float) rand(); } } } } { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { test_output4->data_pointer.p2[(block_index)][(edge_index)] = (float) rand(); } } } printf("Setup output variable...\n"); io_write_define(g, "test_output1", (GVAL *) test_output1, FLOAT32, GRID_POS_CELL, GRID_DIM_3D, &io_test_output1); io_write_define(g, "test_output2", (GVAL *) test_output2, FLOAT32, GRID_POS_EDGE, GRID_DIM_3D, &io_test_output2); io_write_define(g, "test_output3", (GVAL *) test_output3, FLOAT32, GRID_POS_CELL, GRID_DIM_2D, &io_test_output3); io_write_define(g, "test_output4", (GVAL *) test_output4, FLOAT32, GRID_POS_EDGE, GRID_DIM_2D, &io_test_output4); io_write_registration_complete(g); io_write_announce(g, &io_test_output1); io_write_announce(g, &io_test_output2); io_write_announce(g, &io_test_output3); io_write_announce(g, &io_test_output4); printf("Writing to disk...\n"); io_write_start(g); io_write_finalize(g); printf("Init read...\n"); io_read_init(g, "temp_netcdf_test_output.cdf"); io_read_register(g, "test_output1", (GVAL *) test_input1, FLOAT32, FLOAT32, GRID_POS_CELL, GRID_DIM_3D); io_read_register(g, "test_output2", (GVAL *) test_input2, FLOAT32, FLOAT32, GRID_POS_EDGE, GRID_DIM_3D); io_read_register(g, "test_output3", (GVAL *) test_input3, FLOAT32, FLOAT32, GRID_POS_CELL, GRID_DIM_2D); io_read_register(g, "test_output4", (GVAL *) test_input4, FLOAT32, FLOAT32, GRID_POS_EDGE, GRID_DIM_2D); printf("Reading from disk...\n"); io_read_start(); printf("Comparing initial values with disk values...\n"); int success = 1; size_t total = 0; size_t count = 0; { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { if (test_output1->data_pointer.p3[(block_index)][(height_index)][(cell_index)] != test_input1->data_pointer.p3[(block_index)][(height_index)][(cell_index)]) { count++; success = 0; } } } } } printf("%d/%d errors for CELL 3D\n", count, g->height * g->cellCount); total += count; count = 0; { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { if (test_output2->data_pointer.p3[(block_index)][(height_index)][(edge_index)] != test_input2->data_pointer.p3[(block_index)][(height_index)][(edge_index)]) { count++; success = 0; } } } } } printf("%d/%d errors for EDGE 3D\n", count, g->height * g->edgeCount); total += count; count = 0; { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { if (test_output3->data_pointer.p2[(block_index)][(cell_index)] != test_input3->data_pointer.p2[(block_index)][(cell_index)]) { count++; success = 0; } } } } printf("%d/%d errors for CELL 2D\n", count, g->cellCount); total += count; count = 0; { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { if (test_output4->data_pointer.p2[(block_index)][(edge_index)] != test_input4->data_pointer.p2[(block_index)][(edge_index)]) { count++; success = 0; } } } } printf("%d/%d errors for EDGE 2D\n", count, g->edgeCount); total += count; count = 0; printf("%d/%d errors total\n", count, g->height * g->cellCount + g->height * g->edgeCount + g->cellCount + g->cellCount); if (success) { printf("\nnetcdf io test\x1B[32m succeded\x1B[0m\n"); } else { printf("\nnetcdf io test\x1B[31m failed\x1B[0m\n"); } }
target_data_array_extension.c
// -------------------------------------------------- // Check extends before // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // -------------------------------------------------- // Check extends after // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // END. #include <stdio.h> #define BEFORE 0 #define AFTER 1 #define SIZE 100 #if EXTENDS == BEFORE # define SMALL_BEG (SIZE-2) # define SMALL_END SIZE # define LARGE_BEG 0 # define LARGE_END SIZE #elif EXTENDS == AFTER # define SMALL_BEG 0 # define SMALL_END 2 # define LARGE_BEG 0 # define LARGE_END SIZE #else # error EXTENDS undefined #endif #define SMALL_SIZE (SMALL_END-SMALL_BEG) #define LARGE_SIZE (LARGE_END-LARGE_BEG) #define SMALL SMALL_BEG:SMALL_SIZE #define LARGE LARGE_BEG:LARGE_SIZE int main() { int arr[SIZE]; // CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG], SMALL_SIZE * sizeof arr[0]); // CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG], LARGE_SIZE * sizeof arr[0]); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: arr[LARGE]) { #pragma omp target data map(present, tofrom: arr[SMALL]) ; } // CHECK: arr is present fprintf(stderr, "arr is present\n"); // CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes) // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(alloc: arr[SMALL]) { #pragma omp target data map(present, tofrom: arr[LARGE]) ; } // CHECK-NOT: arr is present fprintf(stderr, "arr is present\n"); return 0; }
matmul-omp2.c
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include <cuda_runtime.h> #include "cublas_v2.h" #ifndef N #define N (1 << 10) #endif #pragma omp declare target #define SM 64 #define NTHRDS7 (1 << 0x7) /* 2^{7} */ #define NTHRDS8 (1 << 0x8) /* 2^{8} */ #define NTHRDS9 (1 << 0x9) /* 2^{9} */ #define LTEAMSD (1 << 0xD) /* 2^{13} */ #define LTEAMSE (1 << 0xE) /* 2^{14} */ #define LTEAMSF (1 << 0xF) /* 2^{15} */ #define LTEAMSG (1 << 020) /* 2^{16} */ #define BLKROW (512) /* 4x number of threads in each team */ #define BLKDIM (16) void gemm_accel_opt2(float *restrict a, float *restrict b, float *restrict c, int n) { /* * - jik-loop * - 2^7 threads per team and 2^13 teams * - collapse(3) * - 4x j-loop unrolling (stride of 1 col ) * - 4x i-loop unrolling (stride of 2^7 rows) * - 4x k-loop unrolling * - rb: 4x data re-use * - ra: 4x data re-use * - register blocking */ #pragma omp target data \ map(to \ : n, a [0:n * n], b [0:n * n]) map(tofrom \ : c [0:n * n]) { #pragma omp target teams num_teams(LTEAMSD) thread_limit(NTHRDS7) \ map(to \ : n, a [0:n * n], b [0:n * n]) map(tofrom \ : c [0:n * n]) default(none) shared(a, b, c, n) #pragma omp distribute parallel for num_threads(NTHRDS7) \ dist_schedule(static, NTHRDS7) collapse(3) default(none) shared(a, b, c, n) for (int j = 0; j < n; j += 4) { /* 4x unrolling */ for (int iblk = 0; iblk < n / BLKROW; ++iblk) { for (int i = 0; i < NTHRDS7; ++i) { /* 4x unrolling */ /* register for c: 4x j-loop * 4x i-loop */ float rc0, rc1, rc2, rc3, rc4, rc5, rc6, rc7, rc8, rc9, rca, rcb, rcc, rcd, rce, rcf; rc0 = c[j * n + iblk * BLKROW + i]; rc1 = c[j * n + iblk * BLKROW + i + NTHRDS7]; rc2 = c[j * n + iblk * BLKROW + i + NTHRDS7 * 2]; rc3 = c[j * n + iblk * BLKROW + i + NTHRDS7 * 3]; rc4 = c[(j + 1) * n + iblk * BLKROW + i]; rc5 = c[(j + 1) * n + iblk * BLKROW + i + NTHRDS7]; rc6 = c[(j + 1) * n + iblk * BLKROW + i + NTHRDS7 * 2]; rc7 = c[(j + 1) * n + iblk * BLKROW + i + NTHRDS7 * 3]; rc8 = c[(j + 2) * n + iblk * BLKROW + i]; rc9 = c[(j + 2) * n + iblk * BLKROW + i + NTHRDS7]; rca = c[(j + 2) * n + iblk * BLKROW + i + NTHRDS7 * 2]; rcb = c[(j + 2) * n + iblk * BLKROW + i + NTHRDS7 * 3]; rcc = c[(j + 3) * n + iblk * BLKROW + i]; rcd = c[(j + 3) * n + iblk * BLKROW + i + NTHRDS7]; rce = c[(j + 3) * n + iblk * BLKROW + i + NTHRDS7 * 2]; rcf = c[(j + 3) * n + iblk * BLKROW + i + NTHRDS7 * 3]; for (int k = 0; k < n; k += 4) { /* 4x unrolling */ /* register for b: 4x j-loop * 4x k-loop */ float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7, rb8, rb9, rba, rbb, rbc, rbd, rbe, rbf; rb0 = b[j * n + k]; rb1 = b[j * n + k + 1]; rb2 = b[j * n + k + 2]; rb3 = b[j * n + k + 3]; rb4 = b[(j + 1) * n + k]; rb5 = b[(j + 1) * n + k + 1]; rb6 = b[(j + 1) * n + k + 2]; rb7 = b[(j + 1) * n + k + 3]; rb8 = b[(j + 2) * n + k]; rb9 = b[(j + 2) * n + k + 1]; rba = b[(j + 2) * n + k + 2]; rbb = b[(j + 2) * n + k + 3]; rbc = b[(j + 3) * n + k]; rbd = b[(j + 3) * n + k + 1]; rbe = b[(j + 3) * n + k + 2]; rbf = b[(j + 3) * n + k + 3]; /* register for a: 4x i-loop * 4x k-loop */ float ra0, ra1, ra2, ra3, ra4, ra5, ra6, ra7, ra8, ra9, raa, rab, rac, rad, rae, raf; ra0 = a[k * n + iblk * BLKROW + i]; ra1 = a[k * n + iblk * BLKROW + i + NTHRDS7]; ra2 = a[k * n + iblk * BLKROW + i + NTHRDS7 * 2]; ra3 = a[k * n + iblk * BLKROW + i + NTHRDS7 * 3]; ra4 = a[(k + 1) * n + iblk * BLKROW + i]; ra5 = a[(k + 1) * n + iblk * BLKROW + i + NTHRDS7]; ra6 = a[(k + 1) * n + iblk * BLKROW + i + NTHRDS7 * 2]; ra7 = a[(k + 1) * n + iblk * BLKROW + i + NTHRDS7 * 3]; ra8 = a[(k + 2) * n + iblk * BLKROW + i]; ra9 = a[(k + 2) * n + iblk * BLKROW + i + NTHRDS7]; raa = a[(k + 2) * n + iblk * BLKROW + i + NTHRDS7 * 2]; rab = a[(k + 2) * n + iblk * BLKROW + i + NTHRDS7 * 3]; rac = a[(k + 3) * n + iblk * BLKROW + i]; rad = a[(k + 3) * n + iblk * BLKROW + i + NTHRDS7]; rae = a[(k + 3) * n + iblk * BLKROW + i + NTHRDS7 * 2]; raf = a[(k + 3) * n + iblk * BLKROW + i + NTHRDS7 * 3]; /* * register blocking */ // col 1 of c: rc0 += ra0 * rb0; rc0 += ra4 * rb1; rc0 += ra8 * rb2; rc0 += rac * rb3; rc1 += ra1 * rb0; rc1 += ra5 * rb1; rc1 += ra9 * rb2; rc1 += rad * rb3; rc2 += ra2 * rb0; rc2 += ra6 * rb1; rc2 += raa * rb2; rc2 += rae * rb3; rc3 += ra3 * rb0; rc3 += ra7 * rb1; rc3 += rab * rb2; rc3 += raf * rb3; // col 2 of c: rc4 += ra0 * rb4; rc4 += ra4 * rb5; rc4 += ra8 * rb6; rc4 += rac * rb7; rc5 += ra1 * rb4; rc5 += ra5 * rb5; rc5 += ra9 * rb6; rc5 += rad * rb7; rc6 += ra2 * rb4; rc6 += ra6 * rb5; rc6 += raa * rb6; rc6 += rae * rb7; rc7 += ra3 * rb4; rc7 += ra7 * rb5; rc7 += rab * rb6; rc7 += raf * rb7; // col 3 of c: rc8 += ra0 * rb8; rc8 += ra4 * rb9; rc8 += ra8 * rba; rc8 += rac * rbb; rc9 += ra1 * rb8; rc9 += ra5 * rb9; rc9 += ra9 * rba; rc9 += rad * rbb; rca += ra2 * rb8; rca += ra6 * rb9; rca += raa * rba; rca += rae * rbb; rcb += ra3 * rb8; rcb += ra7 * rb9; rcb += rab * rba; rcb += raf * rbb; // col 4 of c: rcc += ra0 * rbc; rcc += ra4 * rbd; rcc += ra8 * rbe; rcc += rac * rbf; rcd += ra1 * rbc; rcd += ra5 * rbd; rcd += ra9 * rbe; rcd += rad * rbf; rce += ra2 * rbc; rce += ra6 * rbd; rce += raa * rbe; rce += rae * rbf; rcf += ra3 * rbc; rcf += ra7 * rbd; rcf += rab * rbe; rcf += raf * rbf; } c[j * n + iblk * BLKROW + i] = rc0; c[j * n + iblk * BLKROW + i + NTHRDS7] = rc1; c[j * n + iblk * BLKROW + i + NTHRDS7 * 2] = rc2; c[j * n + iblk * BLKROW + i + NTHRDS7 * 3] = rc3; c[(j + 1) * n + iblk * BLKROW + i] = rc4; c[(j + 1) * n + iblk * BLKROW + i + NTHRDS7] = rc5; c[(j + 1) * n + iblk * BLKROW + i + NTHRDS7 * 2] = rc6; c[(j + 1) * n + iblk * BLKROW + i + NTHRDS7 * 3] = rc7; c[(j + 2) * n + iblk * BLKROW + i] = rc8; c[(j + 2) * n + iblk * BLKROW + i + NTHRDS7] = rc9; c[(j + 2) * n + iblk * BLKROW + i + NTHRDS7 * 2] = rca; c[(j + 2) * n + iblk * BLKROW + i + NTHRDS7 * 3] = rcb; c[(j + 3) * n + iblk * BLKROW + i] = rcc; c[(j + 3) * n + iblk * BLKROW + i + NTHRDS7] = rcd; c[(j + 3) * n + iblk * BLKROW + i + NTHRDS7 * 2] = rce; c[(j + 3) * n + iblk * BLKROW + i + NTHRDS7 * 3] = rcf; } /* end i-loop */ } /* end iblk-loop */ } /* end j-loop */ } } void gemm_cublas(float *restrict a, float *restrict b, float *restrict c, int n) { cublasHandle_t handle; float alfa = 1.0f, beta = 1.0f, *a_dev = NULL, *b_dev = NULL, *c_dev = NULL; /* * cublasSgemm in CUBLAS */ if (CUBLAS_STATUS_SUCCESS != cublasCreate(&handle)) { printf("error: initialization (CUBLAS)\n"); cublasDestroy(handle); exit(EXIT_FAILURE); } if (cudaSuccess != cudaMalloc((void **)&a_dev, sizeof(*a) * n * n) || cudaSuccess != cudaMalloc((void **)&b_dev, sizeof(*b) * n * n) || cudaSuccess != cudaMalloc((void **)&c_dev, sizeof(*c) * n * n)) { printf("error: memory allocation (CUDA)\n"); cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); cublasDestroy(handle); exit(EXIT_FAILURE); } if (CUBLAS_STATUS_SUCCESS != cublasSetMatrix(n, n, sizeof(*a), a, n, a_dev, n) || CUBLAS_STATUS_SUCCESS != cublasSetMatrix(n, n, sizeof(*b), b, n, b_dev, n) || CUBLAS_STATUS_SUCCESS != cublasSetMatrix(n, n, sizeof(*c), c, n, c_dev, n)) { printf("error: host --> accl (CUBLAS)\n"); cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); cublasDestroy(handle); exit(EXIT_FAILURE); } if (CUBLAS_STATUS_SUCCESS != cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &alfa, a_dev, n, b_dev, n, &beta, c_dev, n)) { printf("error: cublasSgemm (CUBLAS)\n"); cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); cublasDestroy(handle); exit(EXIT_FAILURE); } if (cudaSuccess != cudaDeviceSynchronize()) { printf("error: device synchronization (CUDA)\n"); cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); cublasDestroy(handle); exit(EXIT_FAILURE); } if (CUBLAS_STATUS_SUCCESS != cublasGetMatrix(n, n, sizeof(*c), c_dev, n, c, n)) { printf("error: accl --> host (CUBLAS)\n"); cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); cublasDestroy(handle); exit(EXIT_FAILURE); } cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); cublasDestroy(handle); } static void reorder2(float *restrict a, float *restrict b, int n) { for (int i = 0; i < SM; i++) for (int j = 0; j < SM; j++) b[i * SM + j] = a[i * n + j]; } static void kernel(float *restrict a, float *restrict b, float *restrict c, int n) { for (int i = 0; i < SM; i++) { for (int k = 0; k < SM; k++) { for (int j = 0; j < SM; j++) { c[i * n + j] += a[i * n + k] * b[k * SM + j]; } } } } void gemm_accel_opt(float *restrict a, float *restrict b, float *restrict c, int n) { #pragma omp target teams distribute parallel for collapse(3) map(to \ : n, a [0:n * n], b [0:n * n]) map(from \ : c [0:n * n]) schedule(static, 1) for (int i = 0; i < n / SM; i++) { for (int j = 0; j < n / SM; j++) { for (int k = 0; k < n / SM; k++) { float b2[SM * SM]; reorder2(&b[SM * (k * n + j)], b2, n); kernel(&a[SM * (i * n + k)], b2, &c[SM * (i * n + j)], n); } } } } #pragma omp end declare target void gemm_opt(float *restrict a, float *restrict b, float *restrict c, int n) { int bk = n / SM; #pragma omp parallel { float b2[SM * SM]; #pragma omp for collapse(3) for (int i = 0; i < bk; i++) { for (int j = 0; j < bk; j++) { for (int k = 0; k < bk; k++) { reorder2(&b[SM * (k * n + j)], b2, n); kernel(&a[SM * (i * n + k)], b2, &c[SM * (i * n + j)], n); } } } } } void gemm(float *restrict a, float *restrict b, float *restrict c, int n) { int i, j, k; #pragma omp parallel for simd collapse(2) schedule(simd \ : static) for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { float sum = 0.0; for (int k = 0; k < n; ++k) { sum += a[i + k * n] * b[k + j * n]; } c[i * n + j] += sum; } } } int main(int argc, char *argv[]) { int i, n = N, iret = 0; float *a, *b, *c, *g; struct timespec rt[2]; double wt; // walltime if (argc > 1) n = atoi(argv[1]); /* * 0. prepare x, y, and z * * y := a * x + y (on host) * z := a * x + z (on accel) */ if (NULL == (a = (float *)malloc(sizeof(*a) * n * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } if (NULL == (b = (float *)malloc(sizeof(*b) * n * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (c = (float *)malloc(sizeof(*c) * n * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (NULL == (g = (float *)malloc(sizeof(*g) * n * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { free(a); free(b); free(c); free(g); exit(EXIT_FAILURE); } if (n <= 1024) { clock_gettime(CLOCK_REALTIME, rt + 0); gemm(a, b, c, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("gemm on host : %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n * n * n / (1.0e6 * wt)); } if (n <= 4096) { clock_gettime(CLOCK_REALTIME, rt + 0); gemm_opt(a, b, c, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("gemm_opt on host : %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n * n * n / (1.0e6 * wt)); } #if 0 #pragma omp target teams distribute parallel for map(to \ : a [0:n * n], b [0:n * n]) map(from \ : c [0:n * n]) collapse(2) for(int i = 0; i < n; ++i){ for(int j = 0; j < n; ++j){ float sum = 0.0; for(int k = 0; k < n; ++k){ sum += a[i+k*n]*b[k+j*n]; } c[i*n+j] += sum; } } #endif if (n <= 4096) { clock_gettime(CLOCK_REALTIME, rt + 0); gemm_accel_opt(a, b, c, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("GEMM-opt1 on accel: %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n * n * n / (1.0e6 * wt)); for (i = 0; i < n; i++) { iret = *(int *)(g + i) ^ *(int *)(c + i); assert(iret == 0); } } clock_gettime(CLOCK_REALTIME, rt + 0); gemm_accel_opt2(a, b, c, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("GEMM-opt2 on accel: %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n * n * n / (1.0e6 * wt)); if (n <= 4096) for (i = 0; i < n; i++) { iret = *(int *)(g + i) ^ *(int *)(c + i); assert(iret == 0); } clock_gettime(CLOCK_REALTIME, rt + 0); gemm_cublas(a, b, c, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("CUBLAS on accel: %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n * n * n / (1.0e6 * wt)); if (n <= 4096) for (i = 0; i < n; i++) { iret = *(int *)(g + i) ^ *(int *)(c + i); assert(iret == 0); } free(a); free(b); free(c); free(g); return 0; }
DRB037-truedepseconddimension-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized in this program. The inner loop has true dependence. Data race pair: b[i][j]@63:7 vs. b[i][j-1]@63:15 */ #include <stdlib.h> #include <stdio.h> double b[1000][1000]; int main(int argc, char* argv[]) { int i,j; int n=1000, m=1000; #pragma omp parallel for private(i ,j ) for (i=0;i<n;i++) #pragma omp parallel for private(j ) for (j=1;j<m;j++) b[i][j]= i * m + j; #pragma omp parallel for private(i ,j ) for (i=0;i<n;i++) for (j=1;j<m;j++) b[i][j]=b[i][j-1]; for (i=0;i<n;i++) for (j=1;j<m;j++) printf("%lf\n",b[i][j]); return 0; }
main.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <float.h> #include <math.h> #ifdef _OPENMP # include <omp.h> #endif #include "main.h" #define min(a, b) ((a<b)?a:b) #define max(a, b) ((a>b)?a:b) void parse(int argc, char* argv[], struct user_parameters* params) { int i; for(i=1; i<argc; i++) { if(!strcmp(argv[i], "-c")) params->check = 1; else if(!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { printf("----------------------------------------------\n"); printf("- KaStORS -\n"); printf("- Kaapi Starpu OpenMP Runtime task Suite -\n"); printf("----------------------------------------------\n"); printf("-h, --help : Show help information\n"); printf("-c : Ask to check result\n"); printf("-i : Number of iterations\n"); printf("-n : Matrix size\n"); printf("-m : SubMatrix size\n"); printf("-t : Choose algorithm (leaving blank will run type task)\n(Options for type) 1 - task, 2 - task with depend\n"); #ifdef BSIZE printf("-b : Block size\n"); #endif #ifdef IBSIZE printf("-ib : Internal Block size\n"); #endif #ifdef CUTOFF_SIZE printf("-s : Cutoff (Size of the matrix)\n"); #endif #ifdef CUTOFF_DEPTH printf("-d : Cutoff (depth)\n"); #endif exit(EXIT_SUCCESS); } else if(!strcmp(argv[i], "-i")) { if (++i < argc) params->niter = atoi(argv[i]); else { fprintf(stderr, "-i requires a number\n"); exit(EXIT_FAILURE); } #ifdef TITER } else if(!strcmp(argv[i], "-r")) { if (++i < argc) params->titer = atoi(argv[i]); else { fprintf(stderr, "-r requires a number\n"); exit(EXIT_FAILURE); } #endif } else if(!strcmp(argv[i], "-n")) { if (++i < argc) params->matrix_size = atoi(argv[i]); else { fprintf(stderr, "-n requires a number\n"); exit(EXIT_FAILURE); } } else if(!strcmp(argv[i], "-m")) { if (++i < argc) params->submatrix_size = atoi(argv[i]); else { fprintf(stderr, "-m requires a number\n"); exit(EXIT_FAILURE); } #ifdef BSIZE } else if(!strcmp(argv[i], "-b")) { if (++i < argc) params->blocksize = atoi(argv[i]); else { fprintf(stderr, "-b requires a number\n"); exit(EXIT_FAILURE); } #endif #ifdef IBSIZE } else if(!strcmp(argv[i], "-ib")) { if (++i < argc) params->iblocksize = atoi(argv[i]); else { fprintf(stderr, "-ib requires a number\n"); exit(EXIT_FAILURE); } #endif #ifdef CUTOFF_SIZE } else if(!strcmp(argv[i], "-s")) { if (++i < argc) params->cutoff_size = atoi(argv[i]); else { fprintf(stderr, "-s requires a number\n"); exit(EXIT_FAILURE); } #endif #ifdef CUTOFF_DEPTH } else if(!strcmp(argv[i], "-d")) { if (++i < argc) params->cutoff_depth = atoi(argv[i]); else { fprintf(stderr, "-d requires a number\n"); exit(EXIT_FAILURE); } #endif } else if(!strcmp(argv[i], "-t")) { if (++i < argc) params->type = atoi(argv[i]); else { fprintf(stderr, "-t requires a number\n"); exit(EXIT_FAILURE); } } else fprintf(stderr, "Unknown parameter : %s\n", argv[i]); } } int comp (const void * elem1, const void * elem2) { double f = *((double*)elem1); double s = *((double*)elem2); if (f > s) return 1; if (f < s) return -1; return 0; } int main(int argc, char* argv[]) { int num_threads = 1; struct user_parameters params; memset(&params, 0, sizeof(params)); /* default value */ params.niter = 1; parse(argc, argv, &params); // get Number of thread if OpenMP is activated #ifdef _OPENMP #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); #endif // warmup // run(&params); double mean = 0.0; double meansqr = 0.0; double min_ = DBL_MAX; double max_ = -1; double* all_times = (double*)malloc(sizeof(double) * params.niter); for (int i=0; i<params.niter; ++i) { double cur_time = run(&params); all_times[i] = cur_time; mean += cur_time; min_ = min(min_, cur_time); max_ = max(max_, cur_time); meansqr += cur_time * cur_time; } mean /= params.niter; meansqr /= params.niter; double stddev = sqrt(meansqr - mean * mean); qsort(all_times, params.niter, sizeof(double), comp); double median = all_times[params.niter / 2]; free(all_times); printf("Program : %s\n", argv[0]); printf("Size : %d\n", params.matrix_size); printf("Submatrix size : %d\n", params.submatrix_size); #ifdef BSIZE printf("Blocksize : %d\n", params.blocksize); #endif #ifdef IBSIZE printf("Internal Blocksize : %d\n", params.iblocksize); #endif #ifdef TITER printf("Iteration time : %d\n", params.titer); #endif printf("Iterations : %d\n", params.niter); #ifdef CUTOFF_SIZE printf("Cutoff Size : %d\n", params.cutoff_size); #endif #ifdef CUTOFF_DEPTH printf("Cutoff depth : %d\n", params.cutoff_depth); #endif printf("Threads : %d\n", num_threads); #ifdef GFLOPS printf("Gflops:: "); #else printf("Time(sec):: "); #endif printf("avg : %lf :: std : %lf :: min : %lf :: max : %lf :: median : %lf\n", mean, stddev, min_, max_, median); if(params.check) printf("Check : %s\n", (params.succeed)? ((params.succeed > 1)?"not implemented":"success") :"fail"); if (params.string2display !=0) printf("%s", params.string2display); printf("\n"); /* Rodar aqui o codigo sequencial run_seq*/ printf("Running Sequential code\n"); struct user_parameters params_seq; memset(&params_seq, 0, sizeof(params_seq)); /* default value */ params_seq.niter = params.niter; params_seq.matrix_size = params.matrix_size; params_seq.submatrix_size = params.submatrix_size; params_seq.check = params.check; params_seq.type = 3; //strcpy(params_seq.string2display, params.string2display); params_seq.string2display = params.string2display; //parse(argc, argv, &params_seq); // warmup run(&params_seq); double mean_seq = 0.0; double meansqr_seq = 0.0; double min_seq = DBL_MAX; double max_seq = -1; double* all_times_seq = (double*)malloc(sizeof(double) * params_seq.niter); for (int i=0; i<params_seq.niter; ++i) { double cur_time = run(&params_seq); all_times_seq[i] = cur_time; mean_seq += cur_time; min_seq = min(min_, cur_time); max_seq = max(max_, cur_time); meansqr_seq += cur_time * cur_time; } mean_seq /= params_seq.niter; meansqr_seq /= params_seq.niter; double stddev_seq = sqrt(meansqr_seq - mean_seq * mean_seq); qsort(all_times_seq, params_seq.niter, sizeof(double), comp); double median_seq = all_times_seq[params_seq.niter / 2]; free(all_times_seq); printf("Sequential Stats\n"); printf("Program : %s\n", argv[0]); printf("Size : %d\n", params_seq.matrix_size); printf("Submatrix size : %d\n", params_seq.submatrix_size); printf("Iterations : %d\n", params_seq.niter); #ifdef GFLOPS printf("Gflops:: "); #else printf("Time(sec):: "); #endif printf("avg : %lf :: std : %lf :: min : %lf :: max : %lf :: median : %lf\n", mean_seq, stddev_seq, min_seq, max_seq, median_seq); if(params_seq.check) printf("Check : %s\n", (params_seq.succeed)? ((params_seq.succeed > 1)?"not implemented":"success") :"fail"); if (params_seq.string2display !=0) printf("%s", params_seq.string2display); printf("\n"); return 0; }
unpk_complex.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include "grb2.h" #include "wgrib2.h" #include "fnlist.h" // 2009 public domain wesley ebisuzaki // // note: assumption that the grib file will use 25 bits or less for storing data // (limit of bitstream unpacking routines) // note: assumption that all data can be stored as integers and have a value < INT_MAX // #define DEBUG int unpk_complex(unsigned char **sec, float *data, unsigned int ndata) { unsigned int i, j, n; int k, nbits, ref_group_length; unsigned char *p, *d, *mask_pointer; double ref_val,factor_10, factor_2, factor; float missing1, missing2; int n_sub_missing; int pack, offset; unsigned clocation; unsigned int ngroups, ref_group_width, nbit_group_width, len_last, npnts; int nbits_group_len, group_length_factor; int *group_refs, *group_widths, *group_lengths, *group_offset, *udata; unsigned int *group_clocation, *group_location; int m1, m2, mask, last, penultimate; int extra_vals[2]; int min_val; int ctable_5_4, ctable_5_6, bitmap_flag, extra_octets; extra_vals[0] = extra_vals[1] = 0; pack = code_table_5_0(sec); if (pack != 2 && pack != 3) return 0; p = sec[5]; ref_val = ieee2flt(p+11); factor_2 = Int_Power(2.0, int2(p+15)); factor_10 = Int_Power(10.0, -int2(p+17)); ref_val *= factor_10; factor = factor_2 * factor_10; nbits = p[19]; ngroups = uint4(p+31); bitmap_flag = code_table_6_0(sec); ctable_5_6 = code_table_5_6(sec); if (pack == 3 && (ctable_5_6 != 1 && ctable_5_6 != 2)) fatal_error_i("unsupported: code table 5.6=%d", ctable_5_6); extra_octets = (pack == 2) ? 0 : sec[5][48]; if (ngroups == 0) { if (bitmap_flag == 255) { for (i = 0; i < ndata; i++) data[i] = ref_val; return 0; } if (bitmap_flag == 0 || bitmap_flag == 254) { mask_pointer = sec[6] + 6; mask = 0; for (i = 0; i < ndata; i++) { if ((i & 7) == 0) mask = *mask_pointer++; data[i] = (mask & 128) ? ref_val : UNDEFINED; mask <<= 1; } return 0; } fatal_error("unknown bitmap", ""); } ctable_5_4 = code_table_5_4(sec); ref_group_width = p[35]; nbit_group_width = p[36]; ref_group_length = uint4(p+37); group_length_factor = p[41]; len_last = uint4(p+42); nbits_group_len = p[46]; #ifdef DEBUG fprintf(stderr,"ctable 5.4 %d ref_group_width %u nbit_group_width %u ref_group_length %u group_length_factor %d\n", ctable_5_4, ref_group_width, nbit_group_width, ref_group_length, group_length_factor); fprintf(stderr,"len_last %u nbit_group_len %u\n", len_last, nbits_group_len); #endif npnts = GB2_Sec5_nval(sec); // number of defined points n_sub_missing = sub_missing_values(sec, &missing1, &missing2); // allocate group widths and group lengths group_refs = (int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_widths = (int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_lengths = (int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_location = (unsigned int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_clocation = (unsigned int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_offset = (int *) malloc(sizeof (unsigned int) * (size_t) ngroups); udata = (int *) malloc(sizeof (unsigned int) * (size_t) npnts); if (group_refs == NULL || group_widths == NULL || group_lengths == NULL || group_location == NULL || group_clocation == NULL || group_offset == NULL || udata == NULL) fatal_error("unpk_complex: memory allocation",""); // read any extra values d = sec[7]+5; min_val = 0; if (extra_octets) { extra_vals[0] = uint_n(d,extra_octets); d += extra_octets; if (ctable_5_6 == 2) { extra_vals[1] = uint_n(d,extra_octets); d += extra_octets; } min_val = int_n(d,extra_octets); d += extra_octets; } if (ctable_5_4 != 1) fatal_error_i("internal decode does not support code table 5.4=%d", ctable_5_4); #pragma omp parallel { #pragma omp sections { #pragma omp section { // read the group reference values rd_bitstream(d, 0, group_refs, nbits, ngroups); } #pragma omp section { unsigned int i; // read the group widths rd_bitstream(d+(nbits*ngroups+7)/8,0,group_widths,nbit_group_width,ngroups); for (i = 0; i < ngroups; i++) group_widths[i] += ref_group_width; } #pragma omp section { unsigned int i; // read the group lengths if (ctable_5_4 == 1) { rd_bitstream(d+(nbits*ngroups+7)/8+(ngroups*nbit_group_width+7)/8, 0,group_lengths, nbits_group_len, ngroups-1); for (i = 0; i < ngroups-1; i++) { group_lengths[i] = group_lengths[i] * group_length_factor + ref_group_length; } group_lengths[ngroups-1] = len_last; } } } #pragma omp single { d += (nbits*ngroups + 7)/8 + (ngroups * nbit_group_width + 7) / 8 + (ngroups * nbits_group_len + 7) / 8; // do a check for number of grid points and size clocation = offset = n = j = 0; } #pragma omp sections { #pragma omp section { unsigned int i; for (i = 0; i < ngroups; i++) { group_location[i] = j; j += group_lengths[i]; n += group_lengths[i]*group_widths[i]; } } #pragma omp section { unsigned int i; for (i = 0; i < ngroups; i++) { group_clocation[i] = clocation; clocation = clocation + group_lengths[i]*(group_widths[i]/8) + (group_lengths[i]/8)*(group_widths[i] % 8); } } #pragma omp section { unsigned int i; for (i = 0; i < ngroups; i++) { group_offset[i] = offset; offset += (group_lengths[i] % 8)*(group_widths[i] % 8); } } } } if (j != npnts) fatal_error_u("bad complex packing: n points %u",j); if (d + (n+7)/8 - sec[7] != GB2_Sec7_size(sec)) fatal_error("complex unpacking size mismatch old test",""); if (d + clocation + (offset + 7)/8 - sec[7] != GB2_Sec7_size(sec)) fatal_error("complex unpacking size mismatch",""); #pragma omp parallel for private(i) schedule(static) for (i = 0; i < ngroups; i++) { group_clocation[i] += (group_offset[i] / 8); group_offset[i] = (group_offset[i] % 8); rd_bitstream(d + group_clocation[i], group_offset[i], udata+group_location[i], group_widths[i], group_lengths[i]); } // handle substitute, missing values and reference value if (n_sub_missing == 0) { #pragma omp parallel for private(i,k,j) for (i = 0; i < ngroups; i++) { j = group_location[i]; for (k = 0; k < group_lengths[i]; k++) { udata[j++] += group_refs[i]; } } } else if (n_sub_missing == 1) { #pragma omp parallel for private(i,m1,k,j) for (i = 0; i < ngroups; i++) { j = group_location[i]; if (group_widths[i] == 0) { m1 = (1 << nbits) - 1; if (m1 == group_refs[i]) { for (k = 0; k < group_lengths[i]; k++) udata[j++] = INT_MAX; } else { for (k = 0; k < group_lengths[i]; k++) udata[j++] += group_refs[i]; } } else { m1 = (1 << group_widths[i]) - 1; for (k = 0; k < group_lengths[i]; k++) { if (udata[j] == m1) udata[j] = INT_MAX; else udata[j] += group_refs[i]; j++; } } } } else if (n_sub_missing == 2) { #pragma omp parallel for private(i,j,k,m1,m2) for (i = 0; i < ngroups; i++) { j = group_location[i]; if (group_widths[i] == 0) { m1 = (1 << nbits) - 1; m2 = m1 - 1; if (m1 == group_refs[i] || m2 == group_refs[i]) { for (k = 0; k < group_lengths[i]; k++) udata[j++] = INT_MAX; } else { for (k = 0; k < group_lengths[i]; k++) udata[j++] += group_refs[i]; } } else { m1 = (1 << group_widths[i]) - 1; m2 = m1 - 1; for (k = 0; k < group_lengths[i]; k++) { if (udata[j] == m1 || udata[j] == m2) udata[j] = INT_MAX; else udata[j] += group_refs[i]; j++; } } } } // post processing if (pack == 3) { if (ctable_5_6 == 1) { last = extra_vals[0]; i = 0; while (i < npnts) { if (udata[i] == INT_MAX) i++; else { udata[i++] = extra_vals[0]; break; } } while (i < npnts) { if (udata[i] == INT_MAX) i++; else { udata[i] += last + min_val; last = udata[i++]; } } } else if (ctable_5_6 == 2) { penultimate = extra_vals[0]; last = extra_vals[1]; i = 0; while (i < npnts) { if (udata[i] == INT_MAX) i++; else { udata[i++] = extra_vals[0]; break; } } while (i < npnts) { if (udata[i] == INT_MAX) i++; else { udata[i++] = extra_vals[1]; break; } } for (; i < npnts; i++) { if (udata[i] != INT_MAX) { udata[i] = udata[i] + min_val + last + last - penultimate; penultimate = last; last = udata[i]; } } } else fatal_error_i("Unsupported: code table 5.6=%d", ctable_5_6); } // convert to float if (bitmap_flag == 255) { #pragma omp parallel for schedule(static) private(i) for (i = 0; i < ndata; i++) { data[i] = (udata[i] == INT_MAX) ? UNDEFINED : ref_val + udata[i] * factor; } } else if (bitmap_flag == 0 || bitmap_flag == 254) { n = 0; mask = 0; mask_pointer = sec[6] + 6; for (i = 0; i < ndata; i++) { if ((i & 7) == 0) mask = *mask_pointer++; if (mask & 128) { if (udata[n] == INT_MAX) data[i] = UNDEFINED; else data[i] = ref_val + udata[n] * factor; n++; } else data[i] = UNDEFINED; mask <<= 1; } } else fatal_error_i("unknown bitmap: %d", bitmap_flag); free(group_refs); free(group_widths); free(group_lengths); free(group_location); free(group_clocation); free(group_offset); free(udata); return 0; }
JointWMF.h
/***************************************************************/ /* * Distribution code Version 1.1 -- 09/21/2014 by Qi Zhang Copyright 2014, The Chinese University of Hong Kong. * * The Code is created based on the method described in the following paper * [1] "100+ Times Faster Weighted Median Filter", Qi Zhang, Li Xu, Jiaya Jia, IEEE Conference on * Computer Vision and Pattern Recognition (CVPR), 2014 * * Due to the adaption for supporting mask and different types of input, this code is * slightly slower than the one claimed in the original paper. Please use * our executable on our website for performance comparison. * * The code and the algorithm are for non-comercial use only. * /***************************************************************/ #ifndef JOINT_WMF_H #define JOINT_WMF_H /***************************************************************/ /* * Standard IO library is required. * STL String library is required. * /***************************************************************/ #include <cstdio> #include <string> /***************************************************************/ /* * OpenCV 2.4 is required. * The following code is already built on OpenCV 2.4.2. * /***************************************************************/ #include "opencv2/core/core.hpp" #include <time.h> #include <omp.h> //Use the namespace of CV and STD using namespace std; using namespace cv; class JointWMF{ public: /***************************************************************/ /* Function: filter * * Description: filter implementation of joint-histogram weighted median framework * including clustering of feature image, adaptive quantization of input image. * * Input arguments: * I: input image (any # of channels). Accept only CV_32F and CV_8U type. * feature: the feature image ("F" in the paper). Accept only CV_8UC1 and CV_8UC3 type (the # of channels should be 1 or 3). * r: radius of filtering kernel, should be a positive integer. * sigma: filter range standard deviation for the feature image. * nI: # of quantization level of input image. (only when the input image is CV_32F type) * nF: # of clusters of feature value. (only when the feature image is 3-channel) * iter: # of filtering times/iterations. (without changing the feature map) * weightType: the type of weight definition, including: * exp: exp(-|I1-I2|^2/(2*sigma^2)) * iv1: (|I1-I2|+sigma)^-1 * iv2: (|I1-I2|^2+sigma^2)^-1 * cos: dot(I1,I2)/(|I1|*|I2|) * jac: (min(r1,r2)+min(g1,g2)+min(b1,b2))/(max(r1,r2)+max(g1,g2)+max(b1,b2)) * off: unweighted * mask: a 0-1 mask that has the same size with I. This mask is used to ignore the effect of some pixels. If the pixel value on mask is 0, * the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling. * * Note: * 1. When feature image clustering (when F is 3-channel) OR adaptive quantization (when I is floating point image) is * performed, the result is an approximation. To increase the accuracy, using a larger "nI" or "nF" will help. * */ /***************************************************************/ static Mat filter(Mat &I, Mat &feature, int r, float sigma=25.5, int nI=256, int nF=256, int iter=1, string weightType="exp", Mat mask=Mat()){ Mat F = feature.clone(); //check validation assert(I.depth() == CV_32F || I.depth() == CV_8U); assert(F.depth() == CV_8U && (F.channels()==1 || F.channels()==3)); //declaration Mat result; //Preprocess I //OUTPUT OF THIS STEP: Is, iMap //If I is floating point image, "adaptive quantization" is done in from32FTo32S. //The mapping of floating value to integer value is stored in iMap (for each channel). //"Is" stores each channel of "I". The channels are converted to CV_32S type after this step. vector<float *> iMap(I.channels()); vector<Mat> Is; { split(I,Is); for(int i=0;i<(int)Is.size();i++){ if(I.depth()==CV_32F){ iMap[i] = new float[nI]; from32FTo32S(Is[i],Is[i],nI,iMap[i]); } else if(I.depth()==CV_8U){ Is[i].convertTo(Is[i],CV_32S); } } } //Preprocess F //OUTPUT OF THIS STEP: F(new), wMap //If "F" is 3-channel image, "clustering feature image" is done in featureIndexing. //If "F" is 1-channel image, featureIndexing only does a type-casting on "F". //The output "F" is CV_32S type, containing indexes of feature values. //"wMap" is a 2D array that defines the distance between each pair of feature indexes. // wMap[i][j] is the weight between feature index "i" and "j". float **wMap; { featureIndexing(F, wMap, nF, sigma, weightType); } //Filtering - Joint-Histogram Framework { for(int i=0;i<(int)Is.size();i++){ for(int k=0;k<iter;k++){ {//Do filtering Is[i] = filterCore(Is[i], F, wMap, r, nF,nI,mask); } } } } float2D_release(wMap); //Postprocess F //Convert input image back to the original type. { for(int i=0;i<(int)Is.size();i++){ if(I.depth()==CV_32F){ from32STo32F(Is[i],Is[i],iMap[i]); delete []iMap[i]; } else if(I.depth()==CV_8U){ Is[i].convertTo(Is[i],CV_8U); } } } //merge the channels merge(Is,result); //end of the function return result; } /***************************************************************/ /* Function: filterCore * * Description: filter core implementation only containing joint-histogram weighted median framework * * input arguments: * I: input image. Only accept CV_32S type. * F: feature image. Only accept CV_32S type. * wMap: a 2D array that defines the distance between each pair of feature values. wMap[i][j] is the weight between feature value "i" and "j". * r: radius of filtering kernel, should be a positive integer. * nI: # of possible values in I, i.e., all values of I should in range [0, nI) * nF: # of possible values in F, i.e., all values of F should in range [0, nF) * mask: a 0-1 mask that has the same size with I, for ignoring the effect of some pixels, as introduced in function "filter" */ /***************************************************************/ static Mat filterCore(Mat &I, Mat &F, float **wMap, int r=20, int nF=256, int nI=256, Mat mask=Mat()){ // Check validation assert(I.depth() == CV_32S && I.channels()==1);//input image: 32SC1 assert(F.depth() == CV_32S && F.channels()==1);//feature image: 32SC1 // Configuration and declaration int rows = I.rows, cols = I.cols; int alls = rows * cols; int winSize = (2*r+1)*(2*r+1); Mat outImg = I.clone(); // Handle Mask if(mask.empty()){ mask = Mat(I.size(),CV_8U); mask = Scalar(1); } // Allocate memory for joint-histogram and BCB int **H = int2D(nI,nF); int *BCB = new int[nF]; // Allocate links for necklace table int **Hf = int2D(nI,nF);//forward link int **Hb = int2D(nI,nF);//backward link int *BCBf = new int[nF];//forward link int *BCBb = new int[nF];//backward link // Column Scanning for(int x=0;x<cols;x++){ // Reset histogram and BCB for each column memset(BCB, 0, sizeof(int)*nF); memset(H[0], 0, sizeof(int)*nF*nI); for(int i=0;i<nI;i++)Hf[i][0]=Hb[i][0]=0; BCBf[0]=BCBb[0]=0; // Reset cut-point int medianVal = -1; // Precompute "x" range and checks boundary int downX = max(0,x-r); int upX = min(cols-1,x+r); // Initialize joint-histogram and BCB for the first window { int upY = min(rows-1,r); for(int i=0;i<=upY;i++){ int *IPtr = I.ptr<int>(i); int *FPtr = F.ptr<int>(i); uchar *maskPtr = mask.ptr<uchar>(i); for(int j=downX;j<=upX;j++){ if(!maskPtr[j])continue; int fval = IPtr[j]; int *curHist = H[fval]; int gval = FPtr[j]; // Maintain necklace table of joint-histogram if(!curHist[gval] && gval){ int *curHf = Hf[fval]; int *curHb = Hb[fval]; int p1=0,p2=curHf[0]; curHf[p1]=gval; curHf[gval]=p2; curHb[p2]=gval; curHb[gval]=p1; } curHist[gval]++; // Maintain necklace table of BCB updateBCB(BCB[gval],BCBf,BCBb,gval,-1); } } } for(int y=0;y<rows;y++){ // Find weighted median with help of BCB and joint-histogram { float balanceWeight = 0; int curIndex = F.ptr<int>(y,x)[0]; float *fPtr = wMap[curIndex]; int &curMedianVal = medianVal; // Compute current balance int i=0; do{ balanceWeight += BCB[i]*fPtr[i]; i=BCBf[i]; }while(i); // Move cut-point to the left if(balanceWeight >= 0){ for(;balanceWeight >= 0 && curMedianVal;curMedianVal--){ float curWeight = 0; int *nextHist = H[curMedianVal]; int *nextHf = Hf[curMedianVal]; // Compute weight change by shift cut-point int i=0; do{ curWeight += (nextHist[i]<<1)*fPtr[i]; // Update BCB and maintain the necklace table of BCB updateBCB(BCB[i],BCBf,BCBb,i,-(nextHist[i]<<1)); i=nextHf[i]; }while(i); balanceWeight -= curWeight; } } // Move cut-point to the right else if(balanceWeight < 0){ for(;balanceWeight < 0 && curMedianVal != nI-1; curMedianVal++){ float curWeight = 0; int *nextHist = H[curMedianVal+1]; int *nextHf = Hf[curMedianVal+1]; // Compute weight change by shift cut-point int i=0; do{ curWeight += (nextHist[i]<<1)*fPtr[i]; // Update BCB and maintain the necklace table of BCB updateBCB(BCB[i],BCBf,BCBb,i,nextHist[i]<<1); i=nextHf[i]; }while(i); balanceWeight += curWeight; } } // Weighted median is found and written to the output image if(balanceWeight<0)outImg.ptr<int>(y,x)[0] = curMedianVal+1; else outImg.ptr<int>(y,x)[0] = curMedianVal; } // Update joint-histogram and BCB when local window is shifted. { int fval,gval,*curHist; // Add entering pixels into joint-histogram and BCB { int rownum = y + r + 1; if(rownum < rows){ int *inputImgPtr = I.ptr<int>(rownum); int *guideImgPtr = F.ptr<int>(rownum); uchar *maskPtr = mask.ptr<uchar>(rownum); for(int j=downX;j<=upX;j++){ if(!maskPtr[j])continue; fval = inputImgPtr[j]; curHist = H[fval]; gval = guideImgPtr[j]; // Maintain necklace table of joint-histogram if(!curHist[gval] && gval){ int *curHf = Hf[fval]; int *curHb = Hb[fval]; int p1=0,p2=curHf[0]; curHf[gval]=p2; curHb[gval]=p1; curHf[p1]=curHb[p2]=gval; } curHist[gval]++; // Maintain necklace table of BCB updateBCB(BCB[gval],BCBf,BCBb,gval,((fval <= medianVal)<<1)-1); } } } // Delete leaving pixels into joint-histogram and BCB { int rownum = y - r; if(rownum >= 0){ int *inputImgPtr = I.ptr<int>(rownum); int *guideImgPtr = F.ptr<int>(rownum); uchar *maskPtr = mask.ptr<uchar>(rownum); for(int j=downX;j<=upX;j++){ if(!maskPtr[j])continue; fval = inputImgPtr[j]; curHist = H[fval]; gval = guideImgPtr[j]; curHist[gval]--; // Maintain necklace table of joint-histogram if(!curHist[gval] && gval){ int *curHf = Hf[fval]; int *curHb = Hb[fval]; int p1=curHb[gval],p2=curHf[gval]; curHf[p1]=p2; curHb[p2]=p1; } // Maintain necklace table of BCB updateBCB(BCB[gval],BCBf,BCBb,gval,-((fval <= medianVal)<<1)+1); } } } } } } // Deallocate the memory { delete []BCB; delete []BCBf; delete []BCBb; int2D_release(H); int2D_release(Hf); int2D_release(Hb); } // end of the function return outImg; } private: static float get_rt(){ struct timeval realtime; clock_gettime(0,&realtime); return (float)(realtime.tv_sec*1000000+realtime.tv_usec); } /***************************************************************/ /* Function: updateBCB * Description: maintain the necklace table of BCB /***************************************************************/ static inline void updateBCB(int &num,int *f,int *b,int i,int v){ static int p1,p2; if(i){ if(!num){ // cell is becoming non-empty p2=f[0]; f[0]=i; f[i]=p2; b[p2]=i; b[i]=0; } else if(!(num+v)){// cell is becoming empty p1=b[i],p2=f[i]; f[p1]=p2; b[p2]=p1; } } // update the cell count num += v; } /***************************************************************/ /* Function: float2D * Description: allocate a 2D float array with dimension "dim1 x dim2" /***************************************************************/ static float** float2D(int dim1, int dim2){ float **ret = new float*[dim1]; ret[0] = new float[dim1*dim2]; for(int i=1;i<dim1;i++)ret[i] = ret[i-1]+dim2; return ret; } /***************************************************************/ /* Function: float2D_release * Description: deallocate the 2D array created by float2D() /***************************************************************/ static void float2D_release(float **p){ delete []p[0]; delete []p; } /***************************************************************/ /* Function: int2D * Description: allocate a 2D integer array with dimension "dim1 x dim2" /***************************************************************/ static int** int2D(int dim1, int dim2){ int **ret = new int*[dim1]; ret[0] = new int[dim1*dim2]; for(int i=1;i<dim1;i++)ret[i] = ret[i-1]+dim2; return ret; } /***************************************************************/ /* Function: int2D_release * Description: deallocate the 2D array created by int2D() /***************************************************************/ static void int2D_release(int **p){ delete []p[0]; delete []p; } /***************************************************************/ /* Function: featureIndexing * Description: convert uchar feature image "F" to CV_32SC1 type. * If F is 3-channel, perform k-means clustering * If F is 1-channel, only perform type-casting /***************************************************************/ static void featureIndexing(Mat &F, float **&wMap, int &nF, float sigmaI, string weightType){ // Configuration and Declaration Mat FNew; int cols = F.cols, rows = F.rows; int alls = cols * rows; int KmeansAttempts=1; vector<string> ops; ops.push_back("exp"); ops.push_back("iv1"); ops.push_back("iv2"); ops.push_back("cos"); ops.push_back("jac"); ops.push_back("off"); // Get weight type number int numOfOps = (int)ops.size(); int op = 0; for(;op<numOfOps;op++)if(ops[op] == weightType)break; if(op>=numOfOps)op=0; /* For 1 channel feature image (uchar)*/ if(F.channels() == 1){ nF = 256; // Type-casting F.convertTo(FNew, CV_32S); // Computer weight map (weight between each pair of feature index) { wMap = float2D(nF,nF); float nSigmaI = sigmaI; float divider = (1.0f/(2*nSigmaI*nSigmaI)); for(int i=0;i<nF;i++){ for(int j=i;j<nF;j++){ float diff = fabs((float)(i-j)); if(op==0)wMap[i][j] = wMap[j][i] = exp(-(diff*diff)*divider); // EXP 2 else if(op==2)wMap[i][j] = wMap[j][i] = 1.0f / (diff*diff+nSigmaI*nSigmaI); // IV2 else if(op==1)wMap[i][j] = wMap[j][i] = 1.0f/(diff+nSigmaI);// IV1 else if(op==3)wMap[i][j] = wMap[j][i] = 1.0f; // COS else if(op==4)wMap[i][j] = wMap[j][i] = (float)(min(i,j)*1.0/max(i,j)); // Jacard else if(op==5)wMap[i][j] = wMap[j][i] = 1.0f; // Unweighted } } } } /* For 3 channel feature image (uchar)*/ else if(F.channels() == 3){ const int shift = 2; // 256(8-bit)->64(6-bit) const int LOW_NUM = 256>>shift; static int hash[LOW_NUM][LOW_NUM][LOW_NUM]={0}; memset(hash,0,sizeof(hash)); // throw pixels into a 2D histogram int candCnt = 0; { int lowR,lowG,lowB; uchar *FPtr = F.ptr<uchar>(); for(int i=0,i3=0;i<alls;i++,i3+=3){ lowB = FPtr[i3]>>shift; lowG = FPtr[i3+1]>>shift; lowR = FPtr[i3+2]>>shift; if(hash[lowB][lowG][lowR]==0){ candCnt++; hash[lowB][lowG][lowR]=1; } } } nF = min(nF, candCnt); Mat samples(candCnt,3,CV_32F); //prepare for K-means { int top=0; for(int i=0;i<LOW_NUM;i++)for(int j=0;j<LOW_NUM;j++)for(int k=0;k<LOW_NUM;k++){ if(hash[i][j][k]){ samples.ptr<float>(top)[0] = (float)i; samples.ptr<float>(top)[1] = (float)j; samples.ptr<float>(top)[2] = (float)k; top++; } } } //do K-means Mat labels; Mat centers; { kmeans(samples, nF, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 0, 10000), KmeansAttempts, KMEANS_PP_CENTERS, centers ); } //make connection (i,j,k) <-> index { int top = 0; for(int i=0;i<LOW_NUM;i++)for(int j=0;j<LOW_NUM;j++)for(int k=0;k<LOW_NUM;k++){ if(hash[i][j][k]){ hash[i][j][k] = labels.ptr<int>(top)[0]; top++; } } } // generate index map { FNew = Mat(F.size(),CV_32SC1); int lowR,lowG,lowB; uchar *FPtr = F.ptr<uchar>(); for(int i=0,i3=0;i<alls;i++,i3+=3){ lowB = FPtr[i3]>>shift; lowG = FPtr[i3+1]>>shift; lowR = FPtr[i3+2]>>shift; FNew.ptr<int>()[i] = hash[lowB][lowG][lowR]; } } // Computer weight map (weight between each pair of feature index) { wMap = float2D(nF,nF); float nSigmaI = sigmaI/256.0f*LOW_NUM; float divider = (1.0f/(2*nSigmaI*nSigmaI)); float *length = new float[nF]; for(int i=0;i<nF;i++){ float a0 = centers.ptr<float>(i)[0]; float a1 = centers.ptr<float>(i)[1]; float a2 = centers.ptr<float>(i)[2]; length[i] = sqrt(a0*a0+a1*a1+a2*a2); } for(int i=0;i<nF;i++){ for(int j=i;j<nF;j++){ float a0 = centers.ptr<float>(i)[0], b0 = centers.ptr<float>(j)[0]; float a1 = centers.ptr<float>(i)[1], b1 = centers.ptr<float>(j)[1]; float a2 = centers.ptr<float>(i)[2], b2 = centers.ptr<float>(j)[2]; float diff0 = a0-b0; float diff1 = a1-b1; float diff2 = a2-b2; if(op==0)wMap[i][j] = wMap[j][i] = exp(-(diff0*diff0+diff1*diff1+diff2*diff2)*divider); // EXP 2 else if(op==2)wMap[i][j] = wMap[j][i] = 1.0f / (diff0*diff0+diff1*diff1+diff2*diff2+nSigmaI*nSigmaI); // IV2 else if(op==1)wMap[i][j] = wMap[j][i] = 1.0f/(fabs(diff0)+fabs(diff1)+fabs(diff2)+nSigmaI);// IV1 else if(op==3)wMap[i][j] = wMap[j][i] = (a0*b0+a1*b1+a2*b2)/(length[i]*length[j]); // COS else if(op==4)wMap[i][j] = wMap[j][i] = (min(a0,b0)+min(a1,b1)+min(a2,b2))/(max(a0,b0)+max(a1,b1)+max(a2,b2)); // Jacard else if(op==5)wMap[i][j] = wMap[j][i] = 1.0f; // Unweighted } } delete []length; } } //end of the function F = FNew; } /***************************************************************/ /* Function: from32FTo32S * Description: adaptive quantization for changing a floating-point 1D image to integer image. * The adaptive quantization strategy is based on binary search, which searches an * upper bound of quantization error. * The function also return a mapping between quantized value (32F) and quantized index (32S). * The mapping is used to convert integer image back to floating-point image after filtering. /***************************************************************/ static void from32FTo32S(Mat &img, Mat &outImg, int nI, float *mapping){ int rows = img.rows, cols = img.cols; int alls = rows * cols; float *imgPtr = img.ptr<float>(); typedef pair<float,int> pairFI; pairFI *data = (pairFI *)malloc(alls*sizeof(pairFI)); // Sort all pixels of the image by ascending order of pixel value { #pragma omp parallel for for(int i=0;i<alls;i++){ data[i].second = i; data[i].first = imgPtr[i]; } sort(data,data+alls); } // Find lower bound and upper bound of the pixel values double maxVal,minVal; minMaxLoc(img,&minVal,&maxVal); float maxRange = (float)(maxVal - minVal); float th = 1e-5f; float l = 0, r = maxRange*2.0f/nI; // Perform binary search on error bound while(r-l > th){ float m = (r+l)*0.5f; bool suc = true; float base = (float)minVal; int cnt=0; for(int i=0;i<alls;i++){ if(data[i].first>base+m){ cnt++; base = data[i].first; if(cnt==nI){ suc = false; break; } } } if(suc)r=m; else l=m; } Mat retImg(img.size(),CV_32SC1); int *retImgPtr = retImg.ptr<int>(); // In the sorted list, divide pixel values into clusters according to the minimum error bound // Quantize each value to the median of its cluster // Also record the mapping of quantized value and quantized index. float base = (float)minVal; int baseI = 0; int cnt = 0; for(int i=0;i<=alls;i++){ if(i==alls || data[i].first>base+r){ mapping[cnt] = data[(baseI+i-1)>>1].first; //median if(i==alls)break; cnt++; base = data[i].first; baseI = i; } retImgPtr[data[i].second] = cnt; } free(data); //end of the function outImg = retImg; } /***************************************************************/ /* Function: from32STo32F * Description: convert the quantization index image back to the floating-point image accroding to the mapping /***************************************************************/ static void from32STo32F(Mat &img, Mat &outImg, float *mapping){ Mat retImg(img.size(),CV_32F); int rows = img.rows, cols = img.cols, alls = rows*cols; float *retImgPtr = retImg.ptr<float>(); int *imgPtr = img.ptr<int>(); // convert 32S index to 32F real value #pragma omp parallel for for(int i=0;i<alls;i++){ retImgPtr[i] = mapping[imgPtr[i]]; } // end of the function outImg = retImg; } }; #endif
sdd.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main( int argc,char *argv[]) { int t; //number of threads for the program int N; //the main dimension for "A", "B" matrices int **A; int **B; int num_of_lines; //number of lines each thread has work with int m=0; //max absolute diagonal element of "A" int min; //min element of "B" with its row and column int min_row; int min_column; int flag=1; //flag that indicates if the "A" matrix is strictly diagonally dominant (1) or not (0) int row_sum=0; //a row's sum of elements' values except the diagonal one printf("Type the number of threads you'd like to use: "); scanf("%d", &t); omp_set_num_threads(t); printf("Type the main dimension (N) for your \"A\" input matrix: "); scanf("%d", &N); //allocating memory dynamically to both of the matrices we will work on A = (int **)malloc(N*sizeof(int *)); for (int i=0; i<N; i++) A[i]=(int *)malloc(N*sizeof(int)); B = (int **)malloc(N*sizeof(int *)); for (int i=0; i<N; i++) B[i]=(int *)malloc(N*sizeof(int)); printf("\n"); //asking the user to type the values of the "A" matrix for(int i=0;i<N;i++) { for(int j=0;j<N;j++) { printf("A[%d][%d]= ", i, j); scanf("%d", &A[i][j]); } printf("\n"); } //checking "A" matrix if it's strictly diagonally dominant #pragma omp parallel shared(A, N, num_of_lines, flag) { num_of_lines=N/omp_get_num_threads(); #pragma omp for schedule(static, num_of_lines) for(int i=0;i<N;i++) { int row_sum=0; for(int j=0;j<N;j++) { if(i!=j) row_sum+=abs(A[i][j]); } if(abs(A[i][i])<=row_sum) flag=0; } } if(!flag) { printf("\"A\" matrix is not strictly diagonally dominant\n"); } else { //printing the "A" matrix printf("\n********************************************************\n"); printf("********************************************************\n"); printf("The \"A\" %dx%d matrix:\n", N, N); for(int i=0;i<N;i++) { for(int j=0;j<N;j++) { printf("%d \t", A[i][j]); } printf("\n"); } printf("\n"); //finding the absolute maximum value of the diagonal elements in "A" #pragma omp parallel shared(A, N, num_of_lines, m) { num_of_lines=N/omp_get_num_threads(); #pragma omp for schedule(static, num_of_lines) reduction(max:m) for(int i=0;i<N;i++) for(int j=0;j<N;j++) if(i==j) if(abs(A[i][j])>m) m=abs(A[i][j]); } printf("Total (absolute) maximum diagonal element of \"A\" is %d\n", m); printf("********************************************************\n"); //calculating the "B" matrix #pragma omp parallel shared(A, B, N, num_of_lines, m) { #pragma omp for schedule(static, num_of_lines) collapse(2) for(int i=0;i<N;i++) for(int j=0;j<N;j++) if(i==j) B[i][j]=m; else B[i][j]=m-abs(A[i][j]); } //printing the "B" matrix printf("The \"B\" %dx%d matrix:\n", N, N); for(int i=0;i<N;i++) { for(int j=0;j<N;j++) { printf("%d \t", B[i][j]); } printf("\n"); } printf("\n"); //searching for the smallest value and its element's indexes #pragma omp parallel shared(B, N, num_of_lines) { min=9999; //setting the min variable to an extremely big value just so every value of B is definetely smaller that it min_row=0; min_column=0; #pragma omp for schedule(static, num_of_lines) collapse(2) for(int i=0;i<N;i++) for(int j=0;j<N;j++) if(B[i][j]<min) { #pragma omp critical (inc_min) { min=B[i][j]; min_row=i; min_column=j; } } } printf("Minimum element of \"B\" is %d at %dx%d\n", min, min_row, min_column); printf("********************************************************\n"); printf("********************************************************\n"); } return(0); }
declare-simd-3.c
#pragma omp declare simd linear(p:1) linear(val(q):-1) linear(s:-3) int f1 (int *p, int *q, short *s) { return *p + *q + *s; } /* { dg-final { scan-assembler-times "_ZGVbM4l4ln4ln6_f1:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVbN4l4ln4ln6_f1:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVcM4l4ln4ln6_f1:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVcN4l4ln4ln6_f1:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVdM8l4ln4ln6_f1:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVdN8l4ln4ln6_f1:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVeM16l4ln4ln6_f1:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVeN16l4ln4ln6_f1:" 1 { target { i?86-*-* x86_64-*-* } } } } */ #pragma omp declare simd linear(p:s) linear(q:t) uniform (s) linear(r:s) notinbranch simdlen(8) uniform(t) int f2 (int *p, short *q, int s, int r, int t) { return *p + *q + r; } /* { dg-final { scan-assembler-times "_ZGVbN8ls2ls4uls2u_f2:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVcN8ls2ls4uls2u_f2:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVdN8ls2ls4uls2u_f2:" 1 { target { i?86-*-* x86_64-*-* } } } } */ /* { dg-final { scan-assembler-times "_ZGVeN8ls2ls4uls2u_f2:" 1 { target { i?86-*-* x86_64-*-* } } } } */
owl_aeos_tuner_map_impl.h
/* * OWL - OCaml Scientific and Engineering Computing * Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz> */ #ifdef FUN4 CAMLprim value BASE_FUN4(value vN, value vX, value vY) { CAMLparam3(vN, vX, vY); int N = Long_val(vN); struct caml_ba_array *X = Caml_ba_array_val(vX); NUMBER *X_data = (NUMBER *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); NUMBER1 *Y_data = (NUMBER1 *) Y->data; NUMBER *start_x, *stop_x; NUMBER1 *start_y; caml_release_runtime_system(); /* Allow other threads */ start_x = X_data; stop_x = start_x + N; start_y = Y_data; while (start_x != stop_x) { NUMBER x = *start_x; *start_y = MAPFN(x); start_x += 1; start_y += 1; }; caml_acquire_runtime_system(); /* Disallow other threads */ CAMLreturn(Val_unit); } CAMLprim value OMP_FUN4(value vN, value vX, value vY) { CAMLparam3(vN, vX, vY); int N = Long_val(vN); struct caml_ba_array *X = Caml_ba_array_val(vX); NUMBER *X_data = (NUMBER *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); NUMBER1 *Y_data = (NUMBER1 *) Y->data; NUMBER *start_x, *stop_x; NUMBER1 *start_y; caml_release_runtime_system(); /* Allow other threads */ start_x = X_data; stop_x = start_x + N; start_y = Y_data; #pragma omp parallel for schedule(static) for (int i = 0; i < N; i++) { NUMBER x = *(start_x + i); *(start_y + i) = (MAPFN(x)); } caml_acquire_runtime_system(); /* Disallow other threads */ CAMLreturn(Val_unit); } #endif /* FUN4 */ #ifdef BASE_FUN15 CAMLprim value BASE_FUN15(value vN, value vX, value vY, value vZ) { CAMLparam4(vN, vX, vY, vZ); int N = Long_val(vN); struct caml_ba_array *X = Caml_ba_array_val(vX); NUMBER *X_data = (NUMBER *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); NUMBER1 *Y_data = (NUMBER1 *) Y->data; struct caml_ba_array *Z = Caml_ba_array_val(vZ); NUMBER2 *Z_data = (NUMBER2 *) Z->data; NUMBER *start_x, *stop_x; NUMBER1 *start_y; NUMBER2 *start_z; caml_release_runtime_system(); /* Allow other threads */ start_x = X_data; stop_x = start_x + N; start_y = Y_data; start_z = Z_data; for (int i = 0; i < N; i++) { MAPFN((start_x + i), (start_y + i), (start_z + i)); } caml_acquire_runtime_system(); /* Disallow other threads */ CAMLreturn(Val_unit); } CAMLprim value OMP_FUN15(value vN, value vX, value vY, value vZ) { CAMLparam4(vN, vX, vY, vZ); int N = Long_val(vN); struct caml_ba_array *X = Caml_ba_array_val(vX); NUMBER *X_data = (NUMBER *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); NUMBER1 *Y_data = (NUMBER1 *) Y->data; struct caml_ba_array *Z = Caml_ba_array_val(vZ); NUMBER2 *Z_data = (NUMBER2 *) Z->data; NUMBER *start_x, *stop_x; NUMBER1 *start_y; NUMBER2 *start_z; caml_release_runtime_system(); /* Allow other threads */ start_x = X_data; stop_x = start_x + N; start_y = Y_data; start_z = Z_data; #pragma omp parallel for schedule(static) for (int i = 0; i < N; i++) { MAPFN((start_x + i), (start_y + i), (start_z + i)); } caml_acquire_runtime_system(); /* Disallow other threads */ CAMLreturn(Val_unit); } #endif /* FUN15 */ #undef NUMBER #undef NUMBER1 #undef NUMBER2 #undef MAPFN #undef FUN4 #undef FUN15 #undef OMP_FUN4 #undef OMP_FUN15 #undef BASE_FUN4 #undef BASE_FUN15
opencl_gpg_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for GPG format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Converted to use 'common' code, Feb29-Mar1 2016, JimF. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_gpg; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_gpg); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "misc.h" #include "common-opencl.h" #include "options.h" #include "gpg_common.h" #include "twofish.h" #define FORMAT_LABEL "gpg-opencl" #define FORMAT_NAME "OpenPGP / GnuPG Secret Key" #define ALGORITHM_NAME "SHA1/SHA2 OpenCL" #define SALT_SIZE sizeof(struct gpg_common_custom_salt*) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } gpg_password; typedef struct { uint8_t v[32]; } gpg_hash; typedef struct { uint32_t length; uint32_t count; uint32_t key_len; uint8_t salt[SALT_LENGTH]; } gpg_salt; struct fmt_tests gpg_tests[] = { // from GPU /* SHA1-CAST5 salt-iter */ {"$gpg$*1*667*2048*387de4c9e2c1018aed84af75922ecaa92d1bc68d48042144c77dfe168de1fd654e4db77bfbc60ec68f283483382413cbfddddcfad714922b2d558f8729f705fbf973ab1839e756c26207a4bc8796eeb567bf9817f73a2a81728d3e4bc0894f62ad96e04e60752d84ebc01316703b0fd0f618f6120289373347027924606712610c583b25be57c8a130bc4dd796964f3f03188baa057d6b8b1fd36675af94d45847eeefe7fff63b755a32e8abe26b7f3f58bb091e5c7b9250afe2180b3d0abdd2c1db3d4fffe25e17d5b7d5b79367d98c523a6c280aafef5c1975a42fd97242ba86ced73c5e1a9bcab82adadd11ef2b64c3aad23bc930e62fc8def6b1d362e954795d87fa789e5bc2807bfdc69bba7e66065e3e3c2df0c25eab0fde39fbe54f32b26f07d88f8b05202e55874a1fa37d540a5af541e28370f27fe094ca8758cd7ff7b28df1cbc475713d7604b1af22fd758ebb3a83876ed83f003285bc8fdc7a5470f7c5a9e8a93929941692a9ff9f1bc146dcc02aab47e2679297d894f28b62da16c8baa95cd393d838fa63efc9d3f88de93dc970c67022d5dc88dce25decec8848f8e6f263d7c2c0238d36aa0013d7edefd43dac1299a54eb460d9b82cb53cf86fcb7c8d5dba95795a1adeb729a705b47b8317594ac3906424b2c0e425343eca019e53d927e6bc32688bd9e87ee808fb1d8eeee8ab938855131b839776c7da79a33a6d66e57eadb430ef04809009794e32a03a7e030b8792be5d53ceaf480ffd98633d1993c43f536a90bdbec8b9a827d0e0a49155450389beb53af5c214c4ec09712d83b175671358d8e9d54da7a8187f72aaaca5203372841af9b89a07b8aadecafc0f2901b8aec13a5382c6f94712d629333b301afdf52bdfa62534de2b10078cd4d0e781c88efdfe4e5252e39a236af449d4d62081cee630ab*3*254*2*3*8*b1fdf3772bb57e1f*65536*2127ccd55e721ba0", "polished"}, /* SHA1-CAST5 salt-iter */ {"$gpg$*1*668*2048*e5f3ef815854f90dfdc3ad61c9c92e512a53d7203b8a5665a8b00ac5ed92340a6ed74855b976fc451588cc5d51776b71657830f2c311859022a25412ee6746622febff8184824454c15a50d64c18b097af28d3939f5c5aa9589060f25923b8f7247e5a2130fb8241b8cc07a33f70391de7f54d84703d2537b4d1c307bdf824c6be24c6e36501e1754cc552551174ed51a2f958d17c6a5bd3b4f75d7979537ee1d5dcd974876afb93f2bcda7468a589d8dba9b36afbe019c9086d257f3f047e3ff896e52783f13219989307bf277e04a5d949113fc4efcc747334f307a448b949ee61b1db326892a9198789f9253994a0412dd704d9e083000b63fa07096d9d547e3235f7577ecd49199c9c3edfa3e43f65d6c506363d23c21561707f790e17ea25b7a7fce863b3c952218a3ac649002143c9b02df5c47ed033b9a1462d515580b10ac79ebdca61babb020400115f1e9fad26318a32294034ea4cbaf681c7b1de12c4ddb99dd4e39e6c8f13a322826dda4bb0ad22981b17f9e0c4d50d7203e205fb2ee6ded117a87e47b58f58f442635837f2debc6fcfbaebba09cff8b2e855d48d9b96c9a9fb020f66c97dffe53bba316ef756c797557f2334331eecaedf1ab331747dc0af6e9e1e4c8e2ef9ed2889a5facf72f1c43a24a6591b2ef5128ee872d299d32f8c0f1edf2bcc35f453ce27c534862ba2c9f60b65b641e5487f5be53783d79e8c1e5f62fe336d8854a8121946ea14c49e26ff2b2db36cef81390da7b7a8d31f7e131dccc32e6828a32b13f7a56a28d0a28afa8705adbf60cb195b602dd8161d8b6d8feff12b16eb1ac463eaa6ae0fd9c2d906d43d36543ef33659a04cf4e69e99b8455d666139e8860879d7e933e6c5d995dd13e6aaa492b21325f23cbadb1bc0884093ac43651829a6fe5fe4c138aff867eac253569d0dc6*3*254*2*3*8*e318a03635a19291*65536*06af8a67764f5674", "blingbling"}, /* SHA1-CAST5 salt-iter */ {"$gpg$*1*668*2048*8487ca407790457c30467936e109d968bdff7fa4f4d87b0af92e2384627ca546f2898e5f77c00300db87a3388476e2de74f058b8743c2d59ada316bc81c79fdd31e403e46390e3e614f81187fb0ae4ca26ed53a0822ace48026aa8a8f0abdf17d17d72dfa1eba7a763bbd72f1a1a8c020d02d7189bd95b12368155697f5e4e013f7c81f671ca320e72b61def43d3e2cb3d23d105b19fe161f2789a3c81363639b4258c855c5acd1dd6596c46593b2bfec23d319b58d4514196b2e41980fbb05f376a098049f3258f9cdf1628c6ff780963e2c8dc26728d33c6733fbac6e415bd16d924a087269e8351dd1c6129d1ac7925f19d7c9a9ed3b08a53e207ffbfba1d43891da68e39749775b38cbe9e6831def4b4297ce7446d09944583367f58205a4f986d5a84c8cf3871a7e2b6c4e2c94ff1df51cd94aecf7a76cd6991a785c66c78f686e6c47add9e27a6b00a2e709f1383f131e3b83b05c812b2ec76e732d713b780c381b0785f136cd00de7afa0276c95c5f0bb3a4b6ad484d56e390c11f9d975729ae1665189190fd131f49109f899735fd2c2efbafd8b971b196d18aeff70decc9768381f0b2243a05db99bd5911d5b94770ee315e1fe3ab0e090aa460d2c8d06a06fef254fd5fa8967386f1f5d37ea6f667215965eefe3fc6bc131f2883c02925a2a4f05dabc48f05867e68bb68741b6fb3193b7c51b7d053f6fd45108e496b9f8f2810fa75ffe454209e2249f06cc1bfc838a97436ebd64001b9619513bcb519132ce39435ed0d7c84ec0c6013e786eef5f9e23738debc70a68a389040e8caad6bd5bb486e43395e570f8780d3f1d837d2dc2657bbded89f76b06c28c5a58ecaa25a225d3d4513ee8dc8655907905590737b971035f690ac145b2d4322ecc86831f36b39d1490064b2aa27b23084a3a0b029e49a52b6a608219*3*254*2*3*8*0409f810febe5e05*65536*ce0e64511258eecc", "njokuani."}, /* SHA1-CAST5 salt-iter */ {"$gpg$*1*348*1024*e5fbff62d94b41de7fc9f3dd93685aa6e03a2c0fcd75282b25892c74922ec66c7327933087304d34d1f5c0acca5659b704b34a67b0d8dedcb53a10aee14c2615527696705d3ab826d53af457b346206c96ef4980847d02129677c5e21045abe1a57be8c0bf7495b2040d7db0169c70f59994bba4c9a13451d38b14bd13d8fe190cdc693ee207d8adfd8f51023b7502c7c8df5a3c46275acad6314d4d528df37896f7b9e53adf641fe444e18674d59cf46d5a6dffdc2f05e077346bf42fe35937e95f644a58a2370012d993c5008e6d6ff0c66c6d0d0b2f1c22961b6d12563a117897675f6b317bc71e4f2dbf6b9fff23186da2724a584d70401136e8c500784df462ea6548db4eecc782e79afe52fd8c1106c7841c085b8d44465d7a1910161d6c707a377a72f85c39fcb4ee58e6b2f617b6c4b173a52f171854f0e1927fa9fcd9d5799e16d840f06234698cfc333f0ad42129e618c2b9c5b29b17b7*3*254*2*3*8*7353cf09958435f9*9961472*efadea6cd5f3e5a7", "openwall"}, /* SHA1-CAST5 salt-iter */ {"$gpg$*1*668*2048*97b296b60904f6d505344b5b0aa277b0f40de05788a39cd9c39b14a56b607bd5db65e8da6111149a1725d06a4b52bdddf0e467e26fe13f72aa5570a0ea591eec2e24d3e9dd7534f26ec9198c8056ea1c03a88161fec88afd43474d31bf89756860c2bc6a6bc9e2a4a2fc6fef30f8cd2f74da6c301ccd5863f3240d1a2db7cbaa2df3a8efe0950f6200cbc10556393583a6ebb2e041095fc62ae3a9e4a0c5c830d73faa72aa8167b7b714ab85d927382d77bbfffb3f7c8184711e81cf9ec2ca03906e151750181500238f7814d2242721b2307baa9ea66e39b10a4fdad30ee6bff50d79ceac604618e74469ae3c80e7711c16fc85233a9eac39941a564b38513c1591502cde7cbd47a4d02a5d7d5ceceb7ff920ee40c29383bd7779be1e00b60354dd86ca514aa30e8f1523efcffdac1292198fe96983cb989a259a4aa475ed9b4ce34ae2282b3ba0169b2e82f9dee476eff215db33632cdcc72a65ba2e68d8e3f1fed90aaa68c4c886927b733144fb7225f1208cd6a108e675cc0cb11393db7451d883abb6adc58699393b8b7b7e19c8584b6fc95720ced39eabaa1124f423cc70f38385c4e9c4b4eeb39e73e891da01299c0e6ce1e97e1750a5c615e28f486c6a0e4da52c15285e7cf26ac859f5f4190e2804ad81ba4f8403e6358fbf1d48c7d593c3bac20a403010926877db3b9d7d0aaacd713a2b9833aff88d1e6b4d228532a66fe68449ad0d706ca7563fe8c2ec77062cc33244a515f2023701c052f0dd172b7914d497fdaefabd91a199d6cb2b62c71472f52c65d6a67d97d7713d39e91f347d2bc73b421fb5c6c6ba028555e5a92a535aabf7a4234d6ea8a315d8e6dcc82087cc76ec8a7b2366cecf176647538968e804541b79a1b602156970d1b943eb2641f2b123e45d7cace9f2dc84b704938fa8c7579a859ef87eca46*3*254*2*3*8*d911a3f73b050340*2097152*347e15bee29eb77d", "password"}, /* SHA1-CAST5 salt-iter, DSA key */ {"$gpg$*17*42*1024*d974ae70cfbf8ab058b2e1d898add67ab1272535e8c4b9c5bd671adce22d08d5db941a60e0715b4f0c9d*3*254*2*3*8*a9e85673bb9199d8*11534336*71e35b85cddfe2af", "crackme"}, /* gpg --gen-key --s2k-digest-algo SHA256 --s2k-cipher-algo AES */ {"$gpg$*1*668*2048*92f639f5a56692a0fb3bd32ca5d91099b49d4cf283da7d272ed51bdf337a4960e361eeb302d418c3f9620d94a077bcf888b56f892d87e2f330ecab3934ebc080ac440b4bb7cd1f79565f0a8b7331c2302d725451fbeff51ff2f25e69708555edfb353dfcab9ce33f6071ccaa2d32ad93a73082be621a8ec43a66f984551607d1e366892386e2f3cc0bdf6447216d0fbc8402c86d54cf0fd8fc133c4899a5a4b1b36cedfb5b11e804856885a7230def7718684f99f995df24f985706f0c1311d15d9a043b6a0096f5e0bb751c61a07517372441887de0532b35d5e4f9d5b35b2119715ca51a4a59227a3031fbd24f25d06ae8b6d17c1b5998aba281733cc6260930916c0d4fb84bf0cf4e7112b07bf5d78a97716599be4bed78d741757ea7149db2d1c9ff35d3b69f80dd7152ed99642b695c88c0f075ffd8a360f30a3e6160d2c5b99e41418f47ac6f9615c1a4d73b0f05c8d11d8ea18b9ea6bf9e6d2a7642f253b7ee742389a9dc19bb81261061b578609b73ad314e6e5c6afe68640abc62f5009e659fa64790689f7befe5009e396cc63d79493e56371a080c0c94c8f0036dbe9ac5a8861befc5882168f7866ec225641a2cf91d8318fcf660699d1e0272b4e0df7751c84e48513a5d26c27a12bf7f9e6965321a97f0b8162f4861fea9c78ee4bc3110b2d412f38081781f0aba5a43b92af148c4e3d9affa1f6b3a42cfcf7c7275b95445777ae51ed200bdb30606432ff05d132232ee9e8a92eba811b96422ba3390f3dbe23f8d6c5ed5cbee361f980e58394c0a8d0f9e9e1186dbb5defcf5bf3c9b44f55598a0b119b71a8bd8edf6428555e36e76785954997f40409beeea578740fb77334c4a396bfac3a24f8628212737ff6d7ffa3802e7bacd06e3e81344eebd1e60a72efa5f45e09151f55d838fda78007190c040851e5f67*3*254*8*7*16*1d1d7a3090537117d6d18e3b8dc41433*65536*d5285754134a9a05", "12345678"}, /* gpg --gen-key --s2k-digest-algo SHA256 --s2k-cipher-algo CAMELLIA128 */ {"$gpg$*1*668*2048*cce4298ada379aa74719ec266478e8711d7aa880ac552a15577ecb35c5d2f48a78d2b2fa1b015764c747b4632a7fe53675f2e117c64c0c4312e182c45ddcf988ed402de11ee93294e465070e052d198313bb822e88f31bcb1e3206271d8a5833d035effdce53648167663790e502574d1c5cf51fad8ae968bb155b75b22306f65cc37b27e0d6ba9b8e39b567c4853b41b21b9556b21a96f7f20477784118614b821e47d80ebc168d8b763e2bddfc37b7c55af838c9cff3be0e18da6da8f3671ab3c990fe541aedbb2ea8b43060f8cba4651baa4b8c498740064b95511c1e55d2125f99d636aec077ea0a606c1e9d9c919f0ed7f54a877907b45c544e534a843d8fded7334e66b74acc0a67b7ad6ffc317e93215e63ad083515d2394841ba52476096537cf0c436016031698d1497c7983e37fcd8ce4f184b6daa31cb5a2d7631355fc561bf681e309f6474163278ba8fd25e3dcc28342cc3b5c288d3cc95bc1c0746cc082b78f43cf3161d9c6551d56fbf23d83a8e10ae9380f754a2c0b74b93359d1b16213bb81625f301493ba6b347a1e5fb79745f7c8e317814e0e861f4fdb85f988f48ead7012f8e13a58fa07e33761efe64cb39b4bcf1f19d1f8b14f5bfc46c7703922273582bd99c266492247b2281c2565c03fe5270f0e858036ea4c994d4afd2029cc184a877189817dce9b5da2c8f89ea8914a0cc29dc4786aef6638e1983467ff574d2a4cc704bef7a7070c3b2bbb2f23e7c0fd8cf00365decae26a2d8ab45093587b3f8c3224bf7b8dd6c4a43853ef5c9c6eb6df0f2a77b126f55b49f77de5dc382a8327ed6fa24f379a4e9d1296cb0a9066b902f510aca6560f9e50bdd9663a269cdba41dd212dac569845c13226f2cd5311527705b24d698cb0acfb44b8a60bb4d3113ef2cb2cc7d597a889612c7f73aca5f8fd70a7*3*254*8*11*16*65a45645f3abe401f3345713d8eadfdf*65536*48e94f48bcda5a55", "abc"}, /* gpg --gen-key --s2k-digest-algo SHA256 --s2k-cipher-algo AES256 */ {"$gpg$*1*668*2048*4cb57f4b39dc6fc9137f99c9f4799e7a7b3dfa40fe6890e18b7b32c866aa8c63aa35ee1b3d2b5a223120a145fd066d082674552c6c89169c6f9a06efb238ba69c7d8b7826501bdbf6b92dfd7c97f5b9388a2afa6a8f985dbc8c962c56ed92a9f6dca3566e98647df5d31fec885608623e830fcf3346177a0e572dfe23610ae90c323bbb4cc54d857b7ea7642477c490a2fc875f3f7cc7889367f7ba3161df2a6c48218a06468146deeb66fc2d754420b3a967f418696eec725ad7d3093dc17924a2770949dd68f8efa79ddfdccbc7c23091fa7342a72b02f8288a14e7b9c51653a7d4f6044456b72a46033e3eb1855708c3bd310e10fb0f460ac362008d08526cb255e8a3efea5f6741a314b71d5fb811e42d1b3be79e546fcd52bc4d18ce3dcbe6c0b1816c25047bc8d81cbf21b57ba6bb12ab363fb17dd51176a5231e15b2740a66aff37d5b74547fc2af2448e6e83cf2ecbc7f897724e3d802becabdcf9ff2b2d977e45ff170899b1c3714a293b783ef758152c3072ad20a8b36b661c0af40c24e277dcefb3a869cce9a1e7f3afbd0abdbcbf87c309d2cb3fe36bd0069dd60da6651dc6e557d486953ef98699bee86b82baaa412f41c5952b3bec9ab43329f895a76dfd3e0e46bcd10277b1f57dfe43375a330c5c6e953c890c9e075f24fc1a9bdc38ea2ecaf0a4bc58026a545eacc317aee3eeebb39725b3ea6e1171ad600576b36e3d592909b73a4a3841c97a38db51f2579cd93d23560b9486e6a2d4d0a966efb31225c79d3214ed9da5b31b235b26f98a2b2f62f01684cf959056e978fd4ede44f4feaa35a8d411010a0a6df89a5d41eef39d64edea9c6dd79aa3ce9fdb4b41e88389776aafaedb3372e26633f13a63c4a62d2546e9b0c1e0d542991a2f8e9d76a630a20707d42073374308a409fe2a05b1476de07bb25679*3*254*8*9*16*ccdac5fce9ae3ec503390424a918aedb*65536*7dfbd9389fd9de2c", "openwall"}, /* SHA256-AES256 salt-iter */ {"$gpg$*1*348*1024*8f58917c41a894a4a3cdc138161c111312e404a1a27bb19f3234656c805ca9374bbfce59750688c6d84ba2387a4cd48330f504812cf074eba9c4da11d057d0a2662e3c7d9969e1256e40a56cce925fba29f4975ddb8619004def3502a0e7cf2ec818695c158243f21da34440eea1fec20418c7cf7dbe2230279ba9858201c00ae1d412aea1f59a66538fb739a15297ff9de39860e2782bf60a3979a5577a448925a0bc2d24c6bf3d09500046487a60bf5945e1a37b73a93eebb15cfd08c5279a942e150affbbe0d3084ab8aef2b6d9f16dc37b84334d91b80cf6f7b6c2e82d3c2be42afd39827dac16b4581be2d2f01d9703f2b19c16e149414fdfdcf8794aa90804e4b1dac8725bd0b0be52513848973eeadd5ec06d8a1da8ac072800fcc9c579172b58d39db5bc00bc0d7a21cf85fb6c7ce10f64edde425074cb9d1b4b078790aed2a46e79dc7fa4b8f3751111a2ff06f083a20c7d73d6bbc747e0*3*254*8*9*16*5b68d216aa46f2c1ed0f01234ebb6e06*131072*6c18b4661b884405", "openwall"}, /* gpg --gen-key --s2k-digest-algo SHA512 --s2k-cipher-algo AES */ {"$gpg$*1*668*2048*1de86a75cca20667506b71e729cf77e10ec148a948a94199910506e783eba52bf074f5d1d1f4819adbe28c7b51b464069ba3e44fceb62eef3038d3dfe8f7bc6012c9abc35769439730a8aabe99e4603fd2201303e82b413617d8fbaf95fdaee3d16d38a74df86a814f487e78b5c84093187529ebc54232a945628205b2eaf13ffeb41f94b1482a73f3aeb97f297d2398d94be2782a1f24244430cf251553dce8571c99ccbd6fe46e6863b25fe132420d1f49acdf9bf413c2155a794b5cf45cea8bc4d958fee20b5523cc42343a106fca60068f93aedd6d4f6021bee5a22b70969c1c8369a615de3f46867bc9364d0cdde141672c102ae42cb338c21d0ec6dd4eec923345201b3b3f97e94b7f60defb2a733616cdcd50c4254689441ab25d3ffe8adb56ef6654f35b446f05a56eef24a4bcdd52cc2b4590667f56d31c6182a757ad0ca1d1377cb04ac3a0711b25cb978ce51f19b5affe648153fa96ee3204b4043478ea20903aa7ff7f2f71cfcff802de73d709776d2dcf611d2936366c7a42edd7ab12ce4cf354eef5c27118ee89f3bb6f9de37b8e64e6db3071ea0b6de83ed27568e25672b56eacad2fee9a8872ea17b6a5fef7e14c3fece236842d2cef0c2044dbdcb2a3b317f64aaad1703844e0ebe1a5e0a90f137b62735d65dc51cf0357abe7ffd25d41d0e23fa9fc03b1be7b73f6fb8a9db04aed18cec473b0c93ffd981cc54cfd779e116c46ee621f3aa4b2e16a8ab8017a234cf26ac77f433e4544bd5761c8b263ae1b8023f6d1aca73bae1d2da5bbf7824406f5e2ff976fbf6e4b9484f020e9346648435d341de2e06a9e607059f847c5007a078dec2f08f466fc219ea5c4762d678af9b4737466e6af46edab495305a4d30124cc30d67fd3d38787cf763e362fe4484722e22b5f584e7cf64ec2c05390252a23583da9ca*3*254*10*7*16*5dfa8dd3acc0c05f3b1666f0e9243ef9*65536*75807ba0c8e4917f", "12345678"}, /* gpg --gen-key --s2k-digest-algo SHA512 --s2k-cipher-algo AES */ {"$gpg$*1*668*2048*fc59c117c35e67a90ee4fabc451a0a61d719d9f913cd796a3d1cc5dd788a9df62bff604ca19a3ee23ea068e3d0747d1897a5ceee21646799f4734ec4a2d02574255f6eace9674e368c2b4588b8892541ab53907795e25b9afd849d9b1d99f3e90b2b3520caa4262e318b63d3796339878752aaeb9ca636c57a5a9fc12ba621954acead99129d6e87d80674bdce027cd8e7e9865f1ca8ea66f41e298807447f89df5f9a701b42f9f153f43ee16d4e0e2ec7688ab68640553bd5db14c6d9469346e510ea31554537aca0a2108a353be41e1af12a62b78463576d5978d104f22e2b39296181c0a67e5d96f60ad5e1e2693ed37e1d20ed97712c0af5e774d30bf244bd6392a24cd2afdd1b44d856c5363006ccaad5fbd8a9b0afee03c1c326718a97b141297133267cbd128c45e753a6eff6d903e6c937322f72e62f1abe04d0c344eecc3e49b512bb1fe819b8a231502a3f1182bcc0387b0ad65342b97722330c2f271e5e9e21da40b59fd92af047dc4840f40e2c3f8b1fb8acb8cd33ac32e8d3d37eb60d682b45a2ff14623416330f978d90a07f1ec377ccb7ef8288d5ca8cfe31d486dfb748e53b42bb99d3eb674e5462bcb9ff3a8e1b2542780356073f75bb5dd110ac9670d89362ec6f29f988600da58b2d3d446f279e402b09ef4f3160ce5cd0e13861f735c40b7d0bc2b6447ce27b9aaf5c0358745e6e1f108eb1321fd0f4eb8cd5065ebf6bef9b7e097fb217eba65cc26c59e6553c2badfae570cc709cff0b32b398be68b19b4597e9889fc1163cc8e7a77a09cf3dcc63cbaee12c8be34a7eee47edc71bc11b91a939a7ca2dc5d305a1edddcc172f309873a2c8cbcb9caf8e11710e681b310f12678edd211fb3d0bb93c606253c5096c189e3be5cbc28633647e3d3b8ca14af6c76ce450b9258c241ef41d87f46cc33e790a1de*3*254*10*7*16*19424e6ddf44d9af244edc31e7090900*65536*fa31f69128e5fe9c", "abcdef"}, {NULL} }; static int *cracked; static int any_cracked; static cl_int cl_error; static gpg_password *inbuffer; static gpg_hash *outbuffer; static gpg_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static cl_kernel crypt_kernel_sha256, crypt_kernel_sha512; size_t insize, outsize, settingsize, cracked_size; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" static const char *warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(gpg_password) * gws; outsize = sizeof(gpg_hash) * gws; settingsize = sizeof(gpg_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); // Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); // SHA-1 S2K HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); // SHA-256 S2K HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha256, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha256, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha256, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); // SHA-512 S2K HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha512, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha512, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha512, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void init(struct fmt_main *_self) { self = _self; Twofish_initialise(); opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d -DSALT_LENGTH=%d", PLAINTEXT_LENGTH, SALT_LENGTH); opencl_init("$JOHN/kernels/gpg_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "gpg", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); crypt_kernel_sha256 = clCreateKernel(program[gpu_id], "gpg_sha256", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); crypt_kernel_sha512 = clCreateKernel(program[gpu_id], "gpg_sha512", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(gpg_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 300); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static int valid(char *ciphertext, struct fmt_main *self) { return gpg_common_valid(ciphertext, self, 0); } static void set_salt(void *salt) { gpg_common_cur_salt = *(struct gpg_common_custom_salt **)salt; currentsalt.length = SALT_LENGTH; memcpy((char*)currentsalt.salt, gpg_common_cur_salt->salt, currentsalt.length); currentsalt.count = gpg_common_cur_salt->count; currentsalt.key_len = gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } #undef set_key static void set_key(char *key, int index) { uint32_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint32_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run kernel if (gpg_common_cur_salt->hash_algorithm == HASH_SHA1) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); } else if (gpg_common_cur_salt->hash_algorithm == HASH_SHA256) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel_sha256, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); } else if (gpg_common_cur_salt->hash_algorithm == HASH_SHA512) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel_sha512, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); } // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (gpg_common_check(outbuffer[index].v, gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm))) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } /* * Report gpg --s2k-count n as 1st tunable cost, * hash algorithm as 2nd tunable cost, * cipher algorithm as 3rd tunable cost. */ struct fmt_main fmt_opencl_gpg = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT, { "s2k-count", /* only for gpg --s2k-mode 3, see man gpg, option --s2k-count n */ "hash algorithm [2:SHA1 8:SHA256 10:SHA512]", "cipher algorithm [1:IDEA 2:3DES 3:CAST5 4:Blowfish 7:AES128 8:AES192 9:AES256 10:Twofish 11:Camellia128 12:Camellia192 13:Camellia256]", }, { FORMAT_TAG }, gpg_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, gpg_common_get_salt, { gpg_common_gpg_s2k_count, gpg_common_gpg_hash_algorithm, gpg_common_gpg_cipher_algorithm, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
adevs_par_simulator.h
/** * Copyright (c) 2013, James Nutaro * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those * of the authors and should not be interpreted as representing official policies, * either expressed or implied, of the FreeBSD Project. * * Bugs, comments, and questions can be sent to nutaro@gmail.com */ #ifndef __adevs_par_simulator_h_ #define __adevs_par_simulator_h_ #include "adevs_abstract_simulator.h" #include "adevs_msg_manager.h" #include "adevs_lp.h" #include "adevs_lp_graph.h" #include <cassert> #include <cstdlib> #include <iostream> #include <vector> #include <cstdio> namespace adevs { /** * This is the conservative simulator described in "Building Software for Simulation". * Models, network and atomic, can be assigned to specific threads (processors) by calling the * setProc() method. The components of a network will inherit its thread assignment. * Model's with an explicit assignment must have a positive lookahead. Atomic models that are * unassigned, by inheritance or otherwise, must have a positive lookahead and will * be assigned randomly to a thread. Note that this simulator does not support dynamic * structure models. */ template <class X, class T = double> class ParSimulator: public AbstractSimulator<X,T> { public: /** * Create a simulator for the provided model. The Atomic components will * be assigned to the preferred processors, or assigned randomly if no * preference is given or the preference can not be satisfied. The * message manager is used to handle inter-thread events. If msg_manager * is NULL, the assignment and copy constructors of output objects * are used and their is no explicit cleanup (see the MessageManager * documentation). This constructor assumes all to all connection of the * processors. */ ParSimulator(Devs<X,T>* model, MessageManager<X>* msg_manager = NULL); /** * This constructor accepts a directed graph whose edges tell the * simulator which processes feed input to which other processes. * For example, a simulator with processors 1, 2, and 3 where 1 -> 2 * and 2 -> 3 would have two edges: 1->2 and 2->3. */ ParSimulator(Devs<X,T>* model, LpGraph& g, MessageManager<X>* msg_manager = NULL); /// Get the model's next event time T nextEventTime(); /** * Execute the simulator until the next event time is greater * than the specified value. There is no global clock, * so this must be the actual time that you want to stop. */ void execUntil(T stop_time); /** * Deletes the simulator, but leaves the model intact. The model must * exist when the simulator is deleted, so delete the model only after * the simulator is deleted. */ ~ParSimulator(); private: LogicalProcess<X,T>** lp; int lp_count; MessageManager<X>* msg_manager; void init(Devs<X,T>* model); void init_sim(Devs<X,T>* model, LpGraph& g); }; template <class X, class T> ParSimulator<X,T>::ParSimulator(Devs<X,T>* model, MessageManager<X>* msg_manager): AbstractSimulator<X,T>(),msg_manager(msg_manager) { // Create an all to all coupling lp_count = omp_get_max_threads(); LpGraph g; for (int i = 0; i < lp_count; i++) { for (int j = 0; j < lp_count; j++) { if (i != j) { g.addEdge(i,j); g.addEdge(j,i); } } } init_sim(model,g); } template <class X, class T> ParSimulator<X,T>::ParSimulator(Devs<X,T>* model, LpGraph& g, MessageManager<X>* msg_manager): AbstractSimulator<X,T>(),msg_manager(msg_manager) { init_sim(model,g); } template <class X, class T> void ParSimulator<X,T>::init_sim(Devs<X,T>* model, LpGraph& g) { if (msg_manager == NULL) msg_manager = new NullMessageManager<X>(); lp_count = g.getLPCount(); if (omp_get_max_threads() < lp_count) { char buffer[1000]; sprintf(buffer,"More LPs than threads. Set OMP_NUM_THREADS=%d.", lp_count); exception err(buffer); throw err; } omp_set_num_threads(lp_count); lp = new LogicalProcess<X,T>*[lp_count]; for (int i = 0; i < lp_count; i++) { lp[i] = new LogicalProcess<X,T>(i,g.getI(i),g.getE(i), lp,this,msg_manager); } init(model); } template <class X, class T> T ParSimulator<X,T>::nextEventTime() { Time<T> tN = Time<T>::Inf(); for (int i = 0; i < lp_count; i++) { if (lp[i]->getNextEventTime() < tN) tN = lp[i]->getNextEventTime(); } return tN.t; } template <class X, class T> ParSimulator<X,T>::~ParSimulator() { for (int i = 0; i < lp_count; i++) delete lp[i]; delete [] lp; delete msg_manager; } template <class X, class T> void ParSimulator<X,T>::execUntil(T tstop) { #pragma omp parallel { lp[omp_get_thread_num()]->run(tstop); } } template <class X, class T> void ParSimulator<X,T>::init(Devs<X,T>* model) { if (model->getProc() >= 0 && model->getProc() < lp_count) { lp[model->getProc()]->addModel(model); return; } Atomic<X,T>* a = model->typeIsAtomic(); if (a != NULL) { int lp_assign = a->getProc(); if (lp_assign < 0 || lp_assign >= lp_count) lp_assign = ((unsigned long int)(a)^(unsigned long int)(this))%lp_count; lp[lp_assign]->addModel(a); } else { Set<Devs<X,T>*> components; model->typeIsNetwork()->getComponents(components); typename Set<Devs<X,T>*>::iterator iter = components.begin(); for (; iter != components.end(); iter++) { init(*iter); } } } } // end of namespace #endif
GB_binop__bxor_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxor_uint64 // A.*B function (eWiseMult): GB_AemultB__bxor_uint64 // A*D function (colscale): GB_AxD__bxor_uint64 // D*A function (rowscale): GB_DxB__bxor_uint64 // C+=B function (dense accum): GB_Cdense_accumB__bxor_uint64 // C+=b function (dense accum): GB_Cdense_accumb__bxor_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxor_uint64 // C=scalar+B GB_bind1st__bxor_uint64 // C=scalar+B' GB_bind1st_tran__bxor_uint64 // C=A+scalar GB_bind2nd__bxor_uint64 // C=A'+scalar GB_bind2nd_tran__bxor_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) ^ (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT64 || GxB_NO_BXOR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxor_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bxor_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxor_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxor_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxor_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB_bind1st_tran__bxor_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB_bind2nd_tran__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hstio.c
/* ** Allen Farris - Original Implementation. ** ** M.D. De La Pena, 25 February 1998: Modified use of "long" to "int" to ** enforce compatibility with SPP/IRAF. Removed numeric.h - not used. ** ** Version 2.0, 23 March 1998: ** M.D. De La Pena: Added structures SingleGroupLine, FloatHdrLine, and ** ShortHdrLine. Also, new functions to support the acquisition of ** obtaining single lines from a SingleGroup. ** ** 07 April 1998: ** M.D. De La Pena: Added functionality to output a subsection of each image ** of an imset to a file where the subsection is the full size (naxis1/naxis2) ** of the output image. ** ** 26 August 1998: ** M.D. De La Pena: Modified getSingleGroupLine to increment the line number ** by 1. The public interface should have the line numbers from zero through ** (size of data - 1). However, the IRAF routines to acquire lines require ** the line numbers to range from one through (size of data). ** ** 30 September 1998: ** M.D. De La Pena: Removed EXTVER parameter from getSingleGroupLine since ** it was unnecessary. ** ** 07 October 1998 ** M.D. De La Pena: Modified putSect[Float/Short]HD to update the WCS keywords ** (LTV[1/2] and CRPIX[1/2], if these keywords are present in the header. ** A new supporting routine, updateWCS, performs the update. ** ** 09 November 1998: ** M.D. De La Pena: The following routine names have been updated to append ** "Sect" to the end of the name rather than using it as a prefix - consistency ** request: put[Sci/Err/DQ]Sect, put[Float/Short]HDSect, putSingleGroupSect, ** and put[Float/Short]Sect. ** ** 12 November 1998: ** M.D. De La Pena: Added use of the HSTIO_VERSION macro so that a search can ** be done on the HSTIO library or the executable using the HSTIO library to ** determine the version. Corrected an omitted call to "initHdr" in ** allocSingleGroup, allocSingleGroupLine, allocMultiGroup, and ** allocMultiNicmosGroup. ** ** 28 December 1998: ** M.D. De La Pena: Modified getSingleGroup and openSingleGroupLine to avoid ** allocating pointers to space of zero length; now agrees with code from ** getSingleNicmosGroup. ** ** 20 May 1999: ** R.L. Williamson: Modified type for updateWCS from int to void. ** ** 19 August 1999: M.D. De La Pena - Added void to functions which have no ** parameters. ** ** 13 January 2000: ** M.D. De La Pena - Modified freeMultiGroup and freeMultiNicmosGroup to ** handle structures which have been initialized, but never allocated before ** they are freed. ** ** 24 February 2000: ** M.D. De La Pena - Modified getSingleGroupLine such that the input line ** number (which should be zero-based) is no longer incremented by one in ** this routine. The increment by one is done in get[Float/Short]Line. ** Routine initSingleGroupLine() now initializes line_num attribute to -1. ** ** 16 February 2007: ** H.A. Bushouse - Added putSingleNicmosGroupSect routine, patterned after ** the existing putSingleGroupSect, and the supporting putSmplSect and ** putIntgSect routines. ** ** M. Droettboom, January 2010: ** Change to use CFITSIO rather than IRAF IMIO routines. ** ** Phil Hodge, May 2011: ** In putHeader, check status after calling fits_update_key or fits_delete_key. ** For fits_update_key, if status is non-zero return -1. For fits_delete_key, ** however, if status is KEY_NO_EXIST, clear the error messages and set status ** to 0. ** ** ** Sara Ogaz, April 2017 ** Update putHeader: fits_update_keyword should update the keyword and ** comment values if they exist or append new values, and it should be ** a NULL pointer to leave the comment field alone. putHeader was supplying ** the empty string directly, this has been changed to NULL. ** ** ** Table of Contents ** ** Section 1. ** Defines that isolate data type dependencies between Sci, DQ, and Err. ** Section 2. ** Declarations and functions related to error handling. ** Section 3. ** Functions that initialize, allocate, and free storage. ** Section 4 . ** Low-level I/O functions. ** Section 5. ** High-level I/O Functions. ** Section 6. ** Functions to manipulate the header array. */ # include <fitsio.h> # include <ctype.h> # include <stdio.h> # include <string.h> # include <sys/types.h> # include <sys/stat.h> # include <time.h> # include <unistd.h> # include <assert.h> # include <stdlib.h> # include <stdbool.h> # include "hstio.h" # include "hstcalerr.h" /* Global status tracker */ int status; int fcloseNull(FILE * stream) { if (!stream) return 0; return fclose(stream); } int fcloseWithStatus(FILE ** stream) { int ret = HSTCAL_OK; if (fcloseNull(*stream)) ret = IO_ERROR; // Whether or not the operation succeeds, the stream is no longer // associated with a file, and the buffer allocated by std::setbuf or // std::setvbuf, if any, is also disassociated and deallocated if // automatic allocation was used. *stream = NULL; return ret; } /* ** String defined to allow determination of the HSTIO library version ** from the library file (*.a) or the executable using the library. */ const char *hstio_version = HSTIO_VERSION; /* ** Section 1. ** Defines that isolate data type dependencies between Sci, DQ, and Err ** and private data declarations. */ typedef struct { /* CFITSIO TODO: Remove redundant members here */ fitsfile *ff; /* Ptr to cfitsio fitsfile object */ char *filename; /* File name. */ char *extname; /* FITS EXTNAME value. */ int extver; /* FITS EXTVER value. */ int hflag; /* Flag indicating header update. */ Hdr *hdr; /* Address of header lines. */ long dims[2]; /* FITS NAXIS values. */ int type; /* FITS data type. */ unsigned int options; /* I/O options. */ } IODesc; /* The default allocation unit for Header arrays */ # define HdrUnit 36 /* ** Section 2. ** Declarations and functions related to error handling. */ # define ERRLINEWIDTH 2048 static HSTIOError error_status; static char error_msg[ERRLINEWIDTH]; static HSTIOErrHandler errhandler[32]; static int max_err_handlers = 32; static int errtop = -1; HSTIOError hstio_err(void) { return error_status; } char *hstio_errmsg(void) { return error_msg; } int getNumHDUs(const char * fileName, int * hduNum) { *hduNum = 0; assert(fileName && *fileName!='\0'); fitsfile * fptr = NULL; int tmpStatus = HSTCAL_OK; fits_open_file(&fptr, fileName, READONLY, &tmpStatus); if (tmpStatus) return tmpStatus; if (!fptr) return OPEN_FAILED; if (fits_get_num_hdus(fptr, hduNum, &tmpStatus)) { int closeStatus = HSTCAL_OK; fits_close_file(fptr, &closeStatus); return tmpStatus; } fits_close_file(fptr, &tmpStatus); return HSTCAL_OK; } int findTotalNumberOfImsets(const char * fileName, const char * setContainsExtName, int * total) { return findTotalNumberOfHDUSets(fileName, setContainsExtName, IMAGE_HDU, total); } int findTotalNumberOfHDUSets(const char * fileName, const char * setContainsExtName, const int hduType, int * total) { *total = 0; assert(fileName && *fileName!='\0'); int hduNum = 0; int tmpStatus = HSTCAL_OK; if ((tmpStatus = getNumHDUs(fileName, &hduNum))) return tmpStatus; if (hduNum < 1) return HSTCAL_OK; // open file fitsfile * fptr = NULL; fits_open_file(&fptr, fileName, READONLY, &tmpStatus); if (tmpStatus) return tmpStatus; if (!fptr) return OPEN_FAILED; // Determine which method to use: // nimsets = [hdu.name for hdu in hduList].count(setContainsExtName) // OR // nimsets = len(set([hdu.ver for hdu in hduList])) const bool usingExtName = setContainsExtName && *setContainsExtName != '\0' ? true : false; const char * key = usingExtName ? "EXTNAME" : "EXTVER"; int encounteredList[hduNum]; // used for nimsets = len(set([hdu.ver for hdu in hduList])) unsigned encounteredListCursor = 0; // used for nimsets = len(set([hdu.ver for hdu in hduList])) // open each HDU and count {unsigned i; for (i = 1; i <= hduNum; ++i) // HDUs are 1 based { int loopStatus = HSTCAL_OK; // decl here to auto reset int extHDUType = ANY_HDU; // This is populated by fits_movabs_hdu() but init anyhow if (fits_movabs_hdu(fptr, i, &extHDUType, &loopStatus)) { // Since we already know the total number of HDUs, if we can't // move through all of them, a real IO error has occurred. int closeStatus = HSTCAL_OK; fits_close_file(fptr, &closeStatus); return loopStatus; } // Check for HDU type correctness if (hduType != ANY_HDU && extHDUType != hduType) continue; // Get keyword value char keyValue[FLEN_VALUE]; if (fits_read_key(fptr, TSTRING, key, keyValue, NULL, &loopStatus)) { if (loopStatus == KEY_NO_EXIST || loopStatus == VALUE_UNDEFINED) continue; // ignore missing keys and empty values of EXTVER and EXTNAME int closeStatus = HSTCAL_OK; fits_close_file(fptr, &closeStatus); return loopStatus; } if (usingExtName) { // (python) nimsets = [hdu.name for hdu in hduList].count(setContainsExtName) int match = FALSE; int exact = FALSE; int caseSensitive = TRUE; fits_compare_str(setContainsExtName, keyValue, caseSensitive, &match, &exact); if (match || exact) (*total)++; } else { // (python) nimsets = len(set([hdu.ver for hdu in hduList])) int extVer = atoi(keyValue); bool alreadyCounted = false; // Ugly, but list size should be small so who cares {unsigned j; for (j = 0; j < encounteredListCursor; ++j) { if (extVer == encounteredList[j]) { alreadyCounted = true; break; } }} // Add to list and inc. total if (!alreadyCounted) { encounteredList[encounteredListCursor++] = extVer; (*total)++; } } }} fits_close_file(fptr, &tmpStatus); return HSTCAL_OK; } int push_hstioerr(HSTIOErrHandler x) { if (errtop == (max_err_handlers - 1)) return -1; ++errtop; errhandler[errtop] = x; return errtop + 1; } int pop_hstioerr(void) { if (errtop == -1) return -1; --errtop; return errtop + 1; } static void clear_err(void) { error_status = HSTOK; error_msg[0] = '\0'; } void clear_hstioerr(void) { error_status = HSTOK; error_msg[0] = '\0'; } void error(HSTIOError e, char *str) { int n; error_status = e; if (str != 0) { n = strlen(str); strncpy(error_msg,str,(n > ERRLINEWIDTH ? ERRLINEWIDTH : n)); error_msg[n] = '\0'; } switch(error_status) { /* Do not make these messages longer than 80 chars. */ case HSTOK: error_msg[0] = '\0'; break; case NOMEM: strcat(error_msg,"\nNo memory left to allocate data."); break; case BADOPEN: strcat(error_msg,"\nError opening image array."); break; case BADCLOSE: strcat(error_msg,"\nError closing image array."); break; case BADREAD: strcat(error_msg,"\nError reading image array."); break; case BADWRITE: strcat(error_msg,"\nError writing image array."); break; case BADEXTNAME: strcat(error_msg,"\nInvalid EXTNAME name"); break; case BADHSIZE: strcat(error_msg,"\nInvalid size for header array."); break; case NOGET: strcat(error_msg,"\nIncorrect I/O mode for get operation."); break; case NOPUT: strcat(error_msg,"\nIncorrect I/O mode for put operation."); break; case BADDIMS: strcat(error_msg,"\nImage has wrong number of dimensions."); break; case BADTYPE: strcat(error_msg,"\nImage has wrong data type."); break; case NOSCI: strcat(error_msg,"\nNo Sci array corresponding to DQ or Err arrays"); break; case BADSCIDIMS: strcat(error_msg,"\nSci array has wrong number of dimensions."); break; case BADGROUP: strcat(error_msg,"\nGroup number is out of range."); break; case BADGET: strcat(error_msg,"\nKeyword specified in get_Kw function was not found."); break; case BADFITSEQ: strcat(error_msg,"\nFITS card has no value indicator."); break; case BADFITSQUOTE: strcat(error_msg,"\nFITS card has no ending quote."); break; case BADFITSNUMERIC: strcat(error_msg,"\nFITS card has invalid numeric field."); break; case BADFITSTYPE: strcat(error_msg,"\nWrong data type specified in get_Kw function."); break; case BADPUT: strcat(error_msg,"\nKeyword specified in put_Kw function was not found."); break; case BADNAME: strcat(error_msg,"\nKeyword name specified in add_Kw function is too long."); break; case BADBITPIX: strcat(error_msg,"\nWrong data type specified in making primary array or image extension."); break; case BADNDIM: strcat(error_msg,"\nWrong number of dimensions in making primary array or image extension."); break; case BADEXIST: strcat(error_msg,"\nFile already exists. Operation would overwrite existing file."); break; case BADREMOVE: strcat(error_msg,"\nError removing file."); break; } if (errtop > -1 && errhandler[errtop] != 0) errhandler[errtop](); } static void ioerr(HSTIOError e, IODescPtr x_, int status) { IODesc *x; char cfitsio_errmsg[81]; x = (IODesc *)x_; sprintf(&error_msg[strlen(error_msg)], "Filename %s EXTNAME %s EXTVER %d CFITSIO status %d\n", x->filename, x->extname, x->extver, status); while (fits_read_errmsg(cfitsio_errmsg)) { strncat(error_msg, cfitsio_errmsg, 80); } error(e,0); } /* ** Section 3. ** Functions that initialize, allocate, and free storage in data structures. */ void initFloatData(FloatTwoDArray *x) { x->buffer = NULL; x->buffer_size = 0; x->tot_nx = 0; x->tot_ny = 0; x->nx = 0; x->ny = 0; x->storageOrder = ROWMAJOR; x->data = NULL; # if defined (DEBUG) printf("initFloatData: %x %x %d\n", (int)x,(int)(x->buffer),x->buffer_size); # endif } int allocFloatData(FloatTwoDArray *x, int i, int j, Bool zeroInitialize) { //WARNING: target (x) must be initialized by caller # if defined (DEBUG) printf("allocFloatData-1: %x %x %d\n", (int)x,(int)(x->buffer),x->buffer_size); # endif if (x->buffer == NULL || x->buffer_size != (i * j)) { if (x->buffer != NULL) { free(x->buffer); x->buffer = NULL; } x->buffer_size = i * j; if (zeroInitialize) x->buffer = calloc(x->buffer_size, sizeof(*x->buffer)); else x->buffer = malloc(x->buffer_size * sizeof(*x->buffer)); if (x->buffer == NULL) { initFloatData(x); error(NOMEM,"Allocating SciData"); return -1; } } x->tot_nx = i; x->tot_ny = j; x->nx = x->tot_nx; x->ny = x->tot_ny; x->data = x->buffer; # if defined (DEBUG) printf("allocFloatData-2: %x %x %d\n", (int)x,(int)(x->buffer),x->buffer_size); # endif return 0; } void freeFloatData(FloatTwoDArray *x) { if (!x) return; # if defined (DEBUG) printf("freeFloatData: %x %x %d\n", (int)x,(int)(x->buffer),x->buffer_size); # endif if (x->buffer != NULL) free(x->buffer); initFloatData(x); } int copyFloatData(FloatTwoDArray * target, const FloatTwoDArray * source, enum StorageOrder targetStorageOrder) { if (!target || !source) return -1; //should this check be raise higher up the call stack also? if (targetStorageOrder != source->storageOrder) { //assumes target initialized if (!target->buffer) { if (allocFloatData(target, source->nx, source->ny, False)) return ALLOCATION_PROBLEM; //allocFloatData() initializes before returning } return swapFloatStorageOrder(target, source, targetStorageOrder); //fall through and copy normally } if (allocFloatData(target, source->nx, source->ny, False)) return ALLOCATION_PROBLEM; //allocFloatData() initializes before returning //allocFloatData() correctly initializes all other members leaving only buffer (data points to buffer) memcpy(target->buffer, source->buffer, source->nx*source->ny*sizeof(*source->buffer)); return 0; } int swapFloatStorageOrder(FloatTwoDArray * target, const FloatTwoDArray * source, enum StorageOrder targetStorageOrder) { //this probably breaks use of Pix on target? Do we need to swap nx & ny? if (!target || !source) return -1; target->storageOrder = targetStorageOrder; if (targetStorageOrder == source->storageOrder) return 0; const unsigned nRows = target->ny; const unsigned nCols = target->nx; {unsigned j; for (j = 0; j < nCols; ++j) { {unsigned i; for (i = 0; i < nRows; ++i) { if (targetStorageOrder == COLUMNMAJOR) target->data[j*nRows + i] = source->data[i*nCols + j]; else target->data[i*nCols + j] = source->data[j*nRows + i]; }} }} return 0; } int swapShortStorageOrder(ShortTwoDArray * target, const ShortTwoDArray * source, enum StorageOrder targetStorageOrder) { //this probably breaks use of Pix on target? Do we need to swap nx & ny? if (!target || !source) return -1; target->storageOrder = targetStorageOrder; if (targetStorageOrder == source->storageOrder) return 0; const unsigned nRows = target->ny; const unsigned nCols = target->nx; {unsigned j; for (j = 0; j < nCols; ++j) { {unsigned i; for (i = 0; i < nRows; ++i) { if (targetStorageOrder == COLUMNMAJOR) target->data[j*nRows + i] = source->data[i*nCols + j]; else target->data[i*nCols + j] = source->data[j*nRows + i]; }} }} return 0; } void initShortData(ShortTwoDArray *x) { x->buffer = NULL; x->buffer_size = 0; x->tot_nx = 0; x->tot_ny = 0; x->nx = 0; x->ny = 0; x->storageOrder = ROWMAJOR; x->data = NULL; # if defined (DEBUG) printf("initShortData: %x %x %d\n", (int)x,(int)(x->buffer),x->buffer_size); # endif } int allocShortData(ShortTwoDArray *x, int i, int j, Bool zeroInitialize) { # if defined (DEBUG) printf("allocShortData-1: %x %x %d\n", (int)x,(int)(x->buffer),x->buffer_size); # endif if (x->buffer == NULL || x->buffer_size != (i * j)) { if (x->buffer != NULL) free(x->buffer); x->buffer_size = i * j; if (zeroInitialize) x->buffer = calloc(x->buffer_size, sizeof(*x->buffer)); else x->buffer = malloc(x->buffer_size * sizeof(*x->buffer)); if (x->buffer == NULL) { initShortData(x); error(NOMEM,"Allocating DQData"); return -1; } } x->tot_nx = i; x->tot_ny = j; x->nx = x->tot_nx; x->ny = x->tot_ny; x->data = x->buffer; # if defined (DEBUG) printf("allocShortData-2: %x %x %d\n", (int)x,(int)(x->buffer),x->buffer_size); # endif return 0; } void freeShortData(ShortTwoDArray *x) { # if defined (DEBUG) printf("freeShortData: %x %x %d\n", (int)x,(int)(x->buffer),x->buffer_size); # endif if (x->buffer != NULL) free(x->buffer); initShortData(x); } int copyShortData(ShortTwoDArray * target, const ShortTwoDArray * source, enum StorageOrder targetStorageOrder) { if (!target || !source) return -1; //should this check be raise higher up the call stack also? if (targetStorageOrder != source->storageOrder) { //assumes target initialized if (!target->buffer) { if (allocShortData(target, source->nx, source->ny, False)) return ALLOCATION_PROBLEM; //allocShortData() initializes before returning } return swapShortStorageOrder(target, source, targetStorageOrder); //fall through and copy normally } if (allocShortData(target, source->nx, source->ny, False)) return ALLOCATION_PROBLEM; //allocShortData() initializes before returning //allocShortData() correctly initializes all other members leaving only buffer (data points to buffer) memcpy(target->buffer, source->buffer, source->nx*source->ny*sizeof(*source->buffer)); return 0; } void initFloatLine (FloatHdrLine *x) { x->line = NULL; x->tot_nx = 0; } int allocFloatLine (FloatHdrLine *x, int i) { # if defined (DEBUG) printf("allocFloatLine-1: %x %d\n", (int)(x->line),i); # endif if (x->line == NULL || x->tot_nx != i) { if (x->line != NULL) free (x->line); x->tot_nx = i; x->line = (float *) calloc (x->tot_nx, sizeof(float)); if (x->line == NULL) { initFloatLine (x); error (NOMEM,"Allocating float line"); return (-1); } } # if defined (DEBUG) printf("allocFloatLine-2: %x %d\n", (int)(x->line),x->tot_nx); # endif return (0); } void freeFloatLine (FloatHdrLine *x) { # if defined (DEBUG) printf("freeFloatLine: %x %x %d\n", (int)x,(int)(x->line),x->tot_nx); # endif if (x->line != NULL) free (x->line); initFloatLine (x); } void initShortLine (ShortHdrLine *x) { x->line = NULL; x->tot_nx = 0; } int allocShortLine (ShortHdrLine *x, int i) { # if defined (DEBUG) printf("allocShortLine-1: %x %d\n", (int)(x->line),x->tot_nx); # endif if (x->line == NULL || x->tot_nx != i) { if (x->line != NULL) free (x->line); x->tot_nx = i; x->line = (short *) calloc (x->tot_nx, sizeof(short)); if (x->line == NULL) { initShortLine (x); error (NOMEM,"Allocating short line"); return (-1); } } # if defined (DEBUG) printf("allocShortLine-2: %x %d\n", (int)(x->line),x->tot_nx); # endif return (0); } void freeShortLine (ShortHdrLine *x) { # if defined (DEBUG) printf("freeShortLine: %x %x %d\n", (int)x,(int)(x->line),x->tot_nx); # endif if (x->line != NULL) free (x->line); initShortLine (x); } void initHdr(Hdr *h) { # if defined (DEBUG) printf("initHdr: %x %d %d %x\n", (int)h,h->nlines,h->nalloc,(int)(h->array)); # endif h->nlines = 0; h->nalloc = 0; h->array = NULL; } int allocHdr(Hdr *h, int n, Bool zeroInitialize) { # if defined (DEBUG) printf("allocHdr-1: %x %d %d %x %d\n", (int)h,h->nlines,h->nalloc,(int)(h->array),n); # endif h->nlines = 0; if (h->array == NULL || h->nalloc != n) { if (h->array != NULL) free(h->array); h->nalloc = n; if (zeroInitialize) h->array = calloc(n,sizeof(*h->array)); else h->array = malloc(n * sizeof(*h->array)); if (h->array == NULL) { h->nalloc = 0; error(NOMEM,"Allocating Hdr"); return -1; } } # if defined (DEBUG) printf("allocHdr-2: %x %d %d %x %d\n", (int)h,h->nlines,h->nalloc,(int)(h->array),n); # endif return 0; } int reallocHdr(Hdr *h, int n) { int i; HdrArray *tmp; # if defined (DEBUG) printf("reallocHdr-1: %x %d %d %x %d\n", (int)h,h->nlines,h->nalloc,(int)(h->array),n); # endif if (h->array == NULL || n <= h->nalloc) return -1; tmp = (HdrArray *)calloc(n,sizeof(HdrArray)); if (tmp == NULL) { error(NOMEM,"Re-llocating Hdr"); return -1; } h->nalloc = n; for (i = 0; i < h->nlines; ++i) strcpy(tmp[i],h->array[i]); free(h->array); h->array = tmp; # if defined (DEBUG) printf("reallocHdr-2: %x %d %d %x %d\n", (int)h,h->nlines,h->nalloc,(int)(h->array),n); # endif return 0; } void freeHdr(Hdr *h) { # if defined (DEBUG) printf("freeHdr: %x %d %d %x\n", (int)h,h->nlines,h->nalloc,(int)(h->array)); # endif if (!h) return; if (h->array) free(h->array); initHdr(h); } int copyHdr(Hdr *to, const Hdr *from) { if (!to || !from) return -1; //allcoHdr only allocates if to->array == NULL or sizes differ if (allocHdr(to,from->nalloc, False)) return -1; memcpy(to->array, from->array, to->nalloc*sizeof(*to->array)); to->nlines = from->nlines; return 0; } /* ** The above are the basic cases, now for the composite cases. */ void initFloatHdrData(FloatHdrData *x) { x->iodesc = NULL; x->section.x_beg = 0; x->section.y_beg = 0; x->section.sx = 0; x->section.sy = 0; initHdr(&(x->hdr)); initFloatData(&(x->data)); } int allocFloatHdrData(FloatHdrData *x, int i, int j, Bool zeroInitialize) { if (allocFloatData(&(x->data),i,j, zeroInitialize)) return -1; if (allocHdr(&(x->hdr),HdrUnit, zeroInitialize)) return -1; x->section.x_beg = 0; x->section.y_beg = 0; x->section.sx = i; x->section.sy = j; return 0; } int copyFloatHdrData(FloatHdrData * target, const FloatHdrData * src, enum StorageOrder targetStorageOrder) { if (!target || !src) return -1; target->iodesc = src->iodesc; //Since DataSection section refers to image IO, keep as source (I think?). copyDataSection(&target->section, &src->section);//No allocations if (copyHdr(&target->hdr, &src->hdr))//This allocates return ALLOCATION_PROBLEM; return copyFloatData(&target->data, &src->data, targetStorageOrder); } void freeFloatHdrData(FloatHdrData *x) { freeFloatData(&(x->data)); freeHdr(&(x->hdr)); initFloatHdrData(x); } void copyDataSection(DataSection * dest, const DataSection * src) { dest->x_beg = src->x_beg; dest->y_beg = src->y_beg; dest->sx = src->sx; dest->sy = src->sy; } void initShortHdrData(ShortHdrData *x) { x->iodesc = NULL; x->section.x_beg = 0; x->section.y_beg = 0; x->section.sx = 0; x->section.sy = 0; initHdr(&(x->hdr)); initShortData(&(x->data)); } int allocShortHdrData(ShortHdrData *x, int i, int j, Bool zeroInitialize) { if (allocShortData(&(x->data),i,j, zeroInitialize)) return -1; if (allocHdr(&(x->hdr),HdrUnit, zeroInitialize)) return -1; x->section.x_beg = 0; x->section.y_beg = 0; x->section.sx = i; x->section.sy = j; return 0; } int copyShortHdrData(ShortHdrData * target, const ShortHdrData * src, enum StorageOrder targetStorageOrder) { if (!target || !src) return -1; target->iodesc = src->iodesc; //Since DataSection section refers to image IO, keep as source (I think?). copyDataSection(&target->section, &src->section);//No allocations if (copyHdr(&target->hdr, &src->hdr))//This allocates return ALLOCATION_PROBLEM; return copyShortData(&target->data, &src->data, targetStorageOrder); } void freeShortHdrData(ShortHdrData *x) { freeShortData(&(x->data)); freeHdr(&(x->hdr)); initShortHdrData(x); } void initFloatHdrLine (FloatHdrLine *x) { x->iodesc = NULL; initHdr (&(x->hdr)); x->ehdr_loaded = False; x->tot_nx = 0; x->line = NULL; } int allocFloatHdrLine (FloatHdrLine *x, int i) { if (allocFloatLine (x, i)) return (-1); if (allocHdr (&(x->hdr),HdrUnit, True)) return (-1); return (0); } void freeFloatHdrLine (FloatHdrLine *x) { if (x->line != NULL) free(x->line); freeHdr (&(x->hdr)); initFloatHdrLine (x); } void initShortHdrLine (ShortHdrLine *x) { x->iodesc = NULL; initHdr (&(x->hdr)); x->ehdr_loaded = False; x->tot_nx = 0; x->line = NULL; } int allocShortHdrLine (ShortHdrLine *x, int i) { if (allocShortLine (x, i)) return (-1); if (allocHdr (&(x->hdr),HdrUnit, True)) return (-1); return (0); } void freeShortHdrLine (ShortHdrLine *x) { if (x->line != NULL) free(x->line); freeHdr (&(x->hdr)); initShortHdrLine (x); } void initSingleGroup(SingleGroup *x) { x->filename = NULL; x->group_num = 0; x->globalhdr = NULL; initFloatHdrData(&(x->sci)); initShortHdrData(&(x->dq)); initFloatHdrData(&(x->err)); } int allocSingleGroup(SingleGroup *x, int i, int j, Bool zeroInitialize) { if (allocSingleGroupHeader(&x->globalhdr, zeroInitialize) || allocFloatHdrData(&(x->sci),i,j, zeroInitialize) || allocShortHdrData(&(x->dq),i,j, zeroInitialize) || allocFloatHdrData(&(x->err),i,j, zeroInitialize)) return ALLOCATION_PROBLEM; return 0; } int allocSingleGroupHeader(Hdr ** hdr, Bool zeroInitialize) { if (!hdr) return ALLOCATION_PROBLEM; if (*hdr) return 0; //Already allocated if (zeroInitialize) *hdr = calloc(1,sizeof(*hdr)); else *hdr = malloc(sizeof(*hdr)); if (!*hdr) return ALLOCATION_PROBLEM; initHdr(*hdr); return HSTCAL_OK; } int allocSingleGroupExts(SingleGroup *x, int i, int j, unsigned extension, Bool zeroInitialize) { if (allocSingleGroupHeader(&x->globalhdr, zeroInitialize)) return ALLOCATION_PROBLEM; if (extension & SCIEXT) { if (allocFloatHdrData(&(x->sci),i,j, zeroInitialize)) return ALLOCATION_PROBLEM; } if (extension & ERREXT) { if (allocFloatHdrData(&(x->err),i,j, zeroInitialize)) return ALLOCATION_PROBLEM; } if (extension & DQEXT) { if (allocShortHdrData(&(x->dq),i,j, zeroInitialize)) return ALLOCATION_PROBLEM; } return 0; } void setStorageOrder(SingleGroup * group, enum StorageOrder storageOrder) { if (!group) return; group->sci.data.storageOrder = storageOrder; group->err.data.storageOrder = storageOrder; group->dq.data.storageOrder = storageOrder; } void copyOffsetFloatData(float * output, const float * input, unsigned nRows, unsigned nColumns, unsigned outputOffset, unsigned inputOffset, unsigned outputSkipLength, unsigned inputSkipLength) { //WARNING - assumes row major storage {unsigned ithRow; #ifdef _OPENMP #pragma omp parallel for shared(output, input) private(ithRow) schedule(static) #endif for (ithRow = 0; ithRow < nRows; ++ithRow) memcpy(output + outputOffset + ithRow*outputSkipLength, input + inputOffset + ithRow*inputSkipLength, nColumns*sizeof(*output)); } } void copyOffsetShortData(short * output, const short * input, unsigned nRows, unsigned nColumns, unsigned outputOffset, unsigned inputOffset, unsigned outputSkipLength, unsigned inputSkipLength) { //WARNING - assumes row major storage {unsigned ithRow; #ifdef _OPENMP #pragma omp parallel for shared(output, input) private(ithRow) schedule(static) #endif for (ithRow = 0; ithRow < nRows; ++ithRow) memcpy(output + outputOffset + ithRow*outputSkipLength, input + inputOffset + ithRow*inputSkipLength, nColumns*sizeof(*output)); } } void copyOffsetSingleGroup(SingleGroup * output, const SingleGroup * input, unsigned nRows, unsigned nColumns, unsigned outputOffset, unsigned inputOffset, unsigned outputSkipLength, unsigned inputSkipLength) { if (!output || !input) return; //WARNING - assumes row major storage assert(output->sci.data.storageOrder == ROWMAJOR && output->sci.data.storageOrder == ROWMAJOR); //sci data if (output->sci.data.data && input->sci.data.data) copyOffsetFloatData(output->sci.data.data, input->sci.data.data, nRows, nColumns, outputOffset, inputOffset, outputSkipLength, inputSkipLength); //err data if (output->err.data.data && input->err.data.data) copyOffsetFloatData(output->err.data.data, input->err.data.data, nRows, nColumns, outputOffset, inputOffset, outputSkipLength, inputSkipLength); //dq data if (output->dq.data.data && input->dq.data.data) copyOffsetShortData(output->dq.data.data, input->dq.data.data, nRows, nColumns, outputOffset, inputOffset, outputSkipLength, inputSkipLength); } int copySingleGroup(SingleGroup * target, const SingleGroup * source, enum StorageOrder targetStorageOrder) { //WARNING assumes target pre allocated and initialized (entire tree). This way data can be copied to pre //allocated target, i.e. copy(a, b) .. do something .. copy(b, a) //NOTE: If structs contained total size we could just use malloc & memcpy and be done with it. if (!target || !source) return ALLOCATION_PROBLEM; setStorageOrder(target, targetStorageOrder); if (source->filename) { size_t filenameLength = strlen(source->filename)+1; if (!target->filename || (target->filename && strlen(target->filename) != filenameLength)) { if (target->filename) free(target->filename); target->filename = malloc(filenameLength*sizeof(*source->filename)); } if (!target->filename) { initSingleGroup(target); return ALLOCATION_PROBLEM; } memcpy(target->filename, source->filename, filenameLength); } target->group_num = source->group_num; copyHdr(target->globalhdr, source->globalhdr); //This allocates if (source->sci.data.data) { if (copyFloatHdrData(&target->sci, &source->sci, targetStorageOrder)) { initSingleGroup(target); return ALLOCATION_PROBLEM; } } if (source->err.data.data) { if (copyFloatHdrData(&target->err, &source->err, targetStorageOrder)) { initSingleGroup(target); return ALLOCATION_PROBLEM; } } if (source->dq.data.data) { if (copyShortHdrData(&target->dq, &source->dq, targetStorageOrder)) { initSingleGroup(target); return ALLOCATION_PROBLEM; } } return 0; } void freeSingleGroup(SingleGroup *x) { freeFloatHdrData(&(x->err)); freeShortHdrData(&(x->dq)); freeFloatHdrData(&(x->sci)); freeHdr(x->globalhdr); if (x->globalhdr != NULL) free(x->globalhdr); if (x->filename != NULL) free(x->filename); initSingleGroup(x); } void initMultiGroup(MultiGroup *x) { x->ngroups = 0; x->group = NULL; } int allocMultiGroup(MultiGroup *x, int n) { int i; if (x->group != NULL) freeMultiGroup(x); x->ngroups = n; x->group = (SingleGroup *)calloc(n,sizeof(SingleGroup)); if (x->group == NULL) { x->ngroups = 0; error(NOMEM,"Allocating MultiGroup"); return -1; } for (i = 0; i < x->ngroups; ++i) initSingleGroup(&(x->group[i])); x->group[0].globalhdr = (Hdr *)calloc(1,sizeof(Hdr)); if (x->group[0].globalhdr == NULL) return -1; initHdr(x->group[0].globalhdr); for (i = 1; i < x->ngroups; ++i) x->group[i].globalhdr = x->group[0].globalhdr; return 0; } void freeMultiGroup(MultiGroup *x) { int i; if (x->group != NULL) { freeSingleGroup(&(x->group[0])); for (i = 1; i < x->ngroups; ++i) { x->group[i].globalhdr = NULL; x->group[i].filename = NULL; freeSingleGroup(&(x->group[i])); } } initMultiGroup(x); } void initSingleNicmosGroup(SingleNicmosGroup *x) { x->filename = NULL; x->group_num = 0; x->globalhdr = NULL; initFloatHdrData(&(x->sci)); initFloatHdrData(&(x->err)); initShortHdrData(&(x->dq)); initShortHdrData(&(x->smpl)); initFloatHdrData(&(x->intg)); } int allocSingleNicmosGroup(SingleNicmosGroup *x, int i, int j) { if (x->globalhdr == NULL) { x->globalhdr = (Hdr *)calloc(1,sizeof(Hdr)); if (x->globalhdr == NULL) return -1; initHdr(x->globalhdr); } if (allocFloatHdrData(&(x->sci),i,j, True)) return -1; if (allocFloatHdrData(&(x->err),i,j, True)) return -1; if (allocShortHdrData(&(x->dq),i,j, True)) return -1; if (allocShortHdrData(&(x->smpl),i,j, True)) return -1; if (allocFloatHdrData(&(x->intg),i,j, True)) return -1; return 0; } void freeSingleNicmosGroup(SingleNicmosGroup *x) { freeFloatHdrData(&(x->intg)); freeShortHdrData(&(x->smpl)); freeShortHdrData(&(x->dq)); freeFloatHdrData(&(x->err)); freeFloatHdrData(&(x->sci)); freeHdr(x->globalhdr); if (x->globalhdr != NULL) free(x->globalhdr); if (x->filename != NULL) free(x->filename); initSingleNicmosGroup(x); } void initMultiNicmosGroup(MultiNicmosGroup *x) { x->ngroups = 0; x->group = NULL; } int allocMultiNicmosGroup(MultiNicmosGroup *x, int n) { int i; if (x->group != NULL) freeMultiNicmosGroup(x); x->ngroups = n; x->group = (SingleNicmosGroup *)calloc(n,sizeof(SingleNicmosGroup)); if (x->group == NULL) { x->ngroups = 0; error(NOMEM,"Allocating MultiNicmosGroup"); return -1; } for (i = 0; i < x->ngroups; ++i) initSingleNicmosGroup(&(x->group[i])); x->group[0].globalhdr = (Hdr *)calloc(1,sizeof(Hdr)); if (x->group[0].globalhdr == NULL) return -1; initHdr(x->group[0].globalhdr); for (i = 1; i < x->ngroups; ++i) x->group[i].globalhdr = x->group[0].globalhdr; return 0; } void freeMultiNicmosGroup(MultiNicmosGroup *x) { int i; if (x->group != NULL) { freeSingleNicmosGroup(&(x->group[0])); for (i = 1; i < x->ngroups; ++i) { x->group[i].globalhdr = NULL; x->group[i].filename = NULL; freeSingleNicmosGroup(&(x->group[i])); } } initMultiNicmosGroup(x); } void initSingleGroupLine (SingleGroupLine *x) { x->filename = NULL; x->group_num = 0; x->line_num = -1; x->phdr_loaded = False; x->globalhdr = NULL; initFloatHdrLine (&(x->sci)); initFloatHdrLine (&(x->err)); initShortHdrLine (&(x->dq)); } int allocSingleGroupLine (SingleGroupLine *x, int i) { if (x->globalhdr == NULL) { x->globalhdr = (Hdr *) calloc (1,sizeof(Hdr)); if (x->globalhdr == NULL) return (-1); initHdr(x->globalhdr); } if (allocFloatHdrLine (&(x->sci),i)) return (-1); if (allocFloatHdrLine (&(x->err),i)) return (-1); if (allocShortHdrLine (&(x->dq),i)) return (-1); return (0); } void freeSingleGroupLine (SingleGroupLine *x) { freeFloatHdrLine (&(x->sci)); freeFloatHdrLine (&(x->err)); freeShortHdrLine (&(x->dq)); freeHdr (x->globalhdr); if (x->globalhdr != NULL) free (x->globalhdr); if (x->filename != NULL) free (x->filename); initSingleGroupLine (x); } /* ** ** Allocate space for the lines of data from each extension of a Single ** ** Group */ int allocSciLine (SingleGroupLine *x) { IODesc *xio; xio = (IODesc *)(x->sci.iodesc); if (allocFloatLine (&(x->sci), xio->dims[0])) return (-1); return (0); } int allocErrLine (SingleGroupLine *x) { IODesc *xio; xio = (IODesc *)(x->err.iodesc); if (allocFloatLine (&(x->err), xio->dims[0])) return (-1); return (0); } int allocDQLine (SingleGroupLine *x) { IODesc *xio; xio = (IODesc *)(x->dq.iodesc); if (allocShortLine (&(x->dq), xio->dims[0])) return (-1); return (0); } /* ** Section 4. ** Low-level I/O functions. */ char *getFilename(IODescPtr p) { return ((IODesc *)p)->filename; } char *getExtname(IODescPtr p) { return ((IODesc *)p)->extname; } int getExtver(IODescPtr p) { return ((IODesc *)p)->extver; } int getNaxis1(IODescPtr p) { return ((IODesc *)p)->dims[0]; } int getNaxis2(IODescPtr p) { return ((IODesc *)p)->dims[1]; } int getType(IODescPtr p) { return ((IODesc *)p)->type; } # include "c_iraf.h" /* ** Section 5. ** High-level I/O Functions. */ /* ** ** The function ckNewFile() checks to see whether a file exists or not. It ** takes a single argument, the full path name of the file. If there is a ** system environment variable "imclobber" that is set to "yes", the ** function then takes action to remove that file. In the case of VMS, if ** "imclobber" is set to "yes", the function removes all previous versions ** of the file. ** ** The function returns the following integer values: ** -1 = The file did exist but was removed. ** 0 = The file does not exist. ** 1 = The file exists and was not removed. ** 2 = The file exists and an error occured attempting to remove it. ** ** The function is used in the following manner. ** if (ckNewFile("filename.ext") > 0) { ** error("File already exists"); ** } ** ** The ckNewFile() function runs under both UNIX and VMS. */ int ckNewFile(char *fname) { char *value; FILE *x = fopen(fname,"r"); if (x == NULL) return 0; /* file does not exist */ /* file exists */ fcloseWithStatus(&x); value = getenv("imclobber"); if (value == NULL) return 1; /* file exists and was not removed */ if ((strcmp(value,"yes") != 0) && (strcmp(value,"YES") != 0)) return 1; /* file exists and was not removed */ /* file exists and imclobber is yes */ if (remove(fname) != 0) return 2; while (remove(fname) == 0); /* The while loop is for VMS */ return -1; } int openFitsFile(char *filename, unsigned int option) { return 0; } int closeFitsFile(char *filename) { return 0; } /* ** Routine to open the input file, read in the primary header information, * ** acquire file pointers to the SingleGroup extensions, read the headers * ** of the extensions, and allocate space of the appropriate length * ** for the respective lines of data. Access to the SingleGroup extensions * ** remains open. * ** */ int openSingleGroupLine (char *fname, int ever, SingleGroupLine *x) { IODescPtr in; in = openInputImage(fname,"",0); if (hstio_err()) return (-1); if (x->globalhdr != NULL) free(x->globalhdr); if (x->filename != NULL) free(x->filename); x->filename = (char *) calloc ((strlen(fname) + 1),sizeof(char)); strcpy (x->filename,fname); x->globalhdr = (Hdr *)calloc(1,sizeof(Hdr)); if (x->globalhdr == NULL) return -1; initHdr(x->globalhdr); getHeader (in,x->globalhdr); if (hstio_err()) return (-1); x->phdr_loaded = True; closeImage (in); x->group_num = ever; /* obtain the file pointers to the individual SingleGroup * * extensions, read the headers, and allocate the proper size * * storage for the line arrays. */ getSciHdr (fname,ever,&(x->sci)); if (hstio_err()) return (-1); x->sci.ehdr_loaded = True; getErrHdr (fname,ever,&(x->err)); if (hstio_err()) return (-1); x->err.ehdr_loaded = True; getDQHdr (fname,ever,&(x->dq)); if (hstio_err()) return (-1); x->dq.ehdr_loaded = True; allocSciLine (x); allocErrLine (x); allocDQLine (x); clear_err(); return (0); } void closeSingleGroupLine (SingleGroupLine *x) { closeImage (x->sci.iodesc); closeImage (x->err.iodesc); closeImage (x->dq.iodesc); } int getFloatHD(char *fname, char *ename, int ever, FloatHdrData *x) { IODesc *xio; assert(x); x->iodesc = openInputImage(fname,ename,ever); xio = (IODesc *)(x->iodesc); if (hstio_err()) return -1; x->section.sx = xio->dims[0]; x->section.sy = xio->dims[1]; getHeader(x->iodesc,&(x->hdr)); if (hstio_err()) return -1; getFloatData(x->iodesc,&(x->data)); if (hstio_err()) return -1; closeImage(x->iodesc); clear_err(); return 0; } int putFloatHD(char *fname, char *ename, int ever, FloatHdrData *x, int option) { if (option == 0) x->iodesc = openOutputImage(fname, ename, ever, &(x->hdr), x->data.tot_nx, x->data.tot_ny, FITSFLOAT); else if (option & Overwrite) x->iodesc = openUpdateImage(fname, ename, ever, &(x->hdr)); if (hstio_err()) return -1; putFloatData(x->iodesc,&(x->data)); if (hstio_err()) return -1; closeImage(x->iodesc); clear_err(); return 0; } int getShortHD(char *fname, char *ename, int ever, ShortHdrData *x) { IODesc *xio; x->iodesc = openInputImage(fname,ename,ever); xio = (IODesc *)(x->iodesc); if (hstio_err()) return -1; x->section.sx = xio->dims[0]; x->section.sy = xio->dims[1]; getHeader(x->iodesc,&(x->hdr)); if (hstio_err()) return -1; getShortData(x->iodesc,&(x->data)); if (hstio_err()) return -1; closeImage(x->iodesc); clear_err(); return 0; } int putShortHD(char *fname, char *ename, int ever, ShortHdrData *x, int option) { if (option == 0) x->iodesc = openOutputImage(fname, ename, ever, &(x->hdr), x->data.tot_nx, x->data.tot_ny, FITSSHORT); else if (option & Overwrite) x->iodesc = openUpdateImage(fname, ename, ever, &(x->hdr)); if (hstio_err()) return -1; putShortData(x->iodesc,&(x->data)); if (hstio_err()) return -1; closeImage(x->iodesc); clear_err(); return 0; } /* Routine to support the routines which write out a subsection of data in * * memory to output files. XBEG and YBEG are zero-indexed values. The * * coordinate values in the headers are one-indexed. */ void updateWCS (Hdr *hdr, int xbeg, int ybeg) { FitsKw kw; float old_LTV, new_LTV; double old_CRPIX, new_CRPIX; kw = findKw(hdr,"LTV1"); if (kw != 0) { old_LTV = getFloatKw (kw); new_LTV = old_LTV - (float)xbeg; putFloatKw (kw, new_LTV); } kw = findKw(hdr,"LTV2"); if (kw != 0) { old_LTV = getFloatKw (kw); new_LTV = old_LTV - (float)ybeg; putFloatKw (kw, new_LTV); } kw = findKw(hdr,"CRPIX1"); if (kw != 0) { old_CRPIX = getDoubleKw (kw); new_CRPIX = old_CRPIX - (double)xbeg; putDoubleKw (kw, new_CRPIX); } kw = findKw(hdr,"CRPIX2"); if (kw != 0) { old_CRPIX = getDoubleKw (kw); new_CRPIX = old_CRPIX - (double)ybeg; putDoubleKw (kw, new_CRPIX); } } int putFloatHDSect(char *fname, char *ename, int ever, FloatHdrData *x, int xbeg, int ybeg, int xsize, int ysize, int option) { /* Update the LTV keywords */ updateWCS (&(x->hdr), xbeg, ybeg); if (option == 0) x->iodesc = openOutputImage(fname, ename, ever, &(x->hdr), xsize, ysize, FITSFLOAT); else if (option & Overwrite) x->iodesc = openUpdateImage(fname, ename, ever, &(x->hdr)); if (hstio_err()) return -1; putFloatSect(x->iodesc,&(x->data),xbeg,ybeg,xsize,ysize); if (hstio_err()) return -1; closeImage(x->iodesc); clear_err(); return 0; } int putShortHDSect(char *fname, char *ename, int ever, ShortHdrData *x, int xbeg, int ybeg, int xsize, int ysize, int option) { /* Update the LTV keywords */ updateWCS (&(x->hdr), xbeg, ybeg); if (option == 0) x->iodesc = openOutputImage(fname, ename, ever, &(x->hdr), xsize, ysize, FITSSHORT); else if (option & Overwrite) x->iodesc = openUpdateImage(fname, ename, ever, &(x->hdr)); if (hstio_err()) return -1; putShortSect(x->iodesc,&(x->data),xbeg,ybeg,xsize,ysize); if (hstio_err()) return -1; closeImage(x->iodesc); clear_err(); return 0; } int getFloatHdr (char *fname, char *ename, int ever, FloatHdrLine *x) { IODesc *xio; FitsKw kw; int dim1, dim2, no_dims; int status = 0; x->iodesc = openInputImage (fname,ename,ever); if (hstio_err()) return (-1); getHeader (x->iodesc,&(x->hdr)); /* determine dimensions for images which contain a constant value */ xio = (IODesc *)(x->iodesc); if (fits_get_img_dim(xio->ff, &no_dims, &status)) { ioerr(BADDIMS, xio, status); return -1; } if (no_dims == 0) { kw = findKw(xio->hdr,"NPIX1"); if (kw == 0) { ioerr(BADDIMS,xio,0); return -1; } dim1 = getIntKw(kw); kw = findKw(xio->hdr,"NPIX2"); if (kw == 0) { ioerr(BADDIMS,xio,0); return -1; } dim2 = getIntKw(kw); xio->dims[0] = dim1; xio->dims[1] = dim2; } if (hstio_err()) return (-1); clear_err(); return (0); } int getShortHdr (char *fname, char *ename, int ever, ShortHdrLine *x) { IODesc *xio; FitsKw kw; int dim1, dim2, no_dims; int status = 0; x->iodesc = openInputImage (fname,ename,ever); if (hstio_err()) return (-1); getHeader (x->iodesc,&(x->hdr)); /* determine dimensions for images which contain a constant value */ xio = (IODesc *)(x->iodesc); if (fits_get_img_dim(xio->ff, &no_dims, &status)) { ioerr(BADDIMS, xio, status); return -1; } if (no_dims == 0) { kw = findKw(xio->hdr,"NPIX1"); if (kw == 0) { ioerr(BADDIMS,xio,0); return -1; } dim1 = getIntKw(kw); kw = findKw(xio->hdr,"NPIX2"); if (kw == 0) { ioerr(BADDIMS,xio,0); return -1; } dim2 = getIntKw(kw); xio->dims[0] = dim1; xio->dims[1] = dim2; } if (hstio_err()) return (-1); clear_err(); return (0); } int getSci(char *fname, int ever, SciHdrData *x) { return getFloatHD(fname,"SCI",ever,x); } int putSci(char *fname, int ever, SciHdrData *x, int option) { return putFloatHD(fname,"SCI",ever,x,option); } int getErr(char *fname, int ever, ErrHdrData *x) { return getFloatHD(fname,"ERR",ever,x); } int putErr(char *fname, int ever, ErrHdrData *x, int option) { return putFloatHD(fname,"ERR",ever,x,option); } int getDQ(char *fname, int ever, DQHdrData *x) { return getShortHD(fname,"DQ",ever,x); } int putDQ(char *fname, int ever, DQHdrData *x, int option) { return putShortHD(fname,"DQ",ever,x,option); } int getSmpl(char *fname, int ever, SmplHdrData *x) { return getShortHD(fname,"SAMP",ever,x); } int putSmpl(char *fname, int ever, SmplHdrData *x, int option) { return putShortHD(fname,"SAMP",ever,x,option); } int getIntg(char *fname, int ever, IntgHdrData *x) { return getFloatHD(fname,"TIME",ever,x); } int putIntg(char *fname, int ever, IntgHdrData *x, int option) { return putFloatHD(fname,"TIME",ever,x,option); } /* ** ** Routines to output a subsection of an image in memory to a disk file ** ** where the subsection is the full size (NAXIS1/NAXIS2) of the output ** ** image. ** ** */ int putSciSect(char *fname, int ever, SciHdrData *x, int xbeg, int ybeg, int xsize, int ysize, int option) { return (putFloatHDSect(fname,"SCI",ever,x,xbeg,ybeg,xsize,ysize,option)); } int putErrSect(char *fname, int ever, ErrHdrData *x, int xbeg, int ybeg, int xsize, int ysize, int option) { return (putFloatHDSect(fname,"ERR",ever,x,xbeg,ybeg,xsize,ysize,option)); } int putDQSect(char *fname, int ever, DQHdrData *x, int xbeg, int ybeg, int xsize, int ysize, int option) { return (putShortHDSect(fname,"DQ",ever,x,xbeg,ybeg,xsize,ysize,option)); } int putSmplSect(char *fname, int ever, SmplHdrData *x, int xbeg, int ybeg, int xsize, int ysize, int option) { return (putShortHDSect(fname,"SAMP",ever,x,xbeg,ybeg,xsize,ysize,option)); } int putIntgSect(char *fname, int ever, IntgHdrData *x, int xbeg, int ybeg, int xsize, int ysize, int option) { return (putFloatHDSect(fname,"TIME",ever,x,xbeg,ybeg,xsize,ysize,option)); } /* Get just the header for the extension */ int getSciHdr (char *fname, int ever, SciHdrLine *x) { return getFloatHdr (fname,"SCI",ever,x); } int getErrHdr (char *fname, int ever, ErrHdrLine *x) { return getFloatHdr (fname,"ERR",ever,x); } int getDQHdr(char *fname, int ever, DQHdrLine *x) { return getShortHdr (fname,"DQ",ever,x); } /* Get just the data line for the extension */ int getSciLine (SciHdrLine *x, int line_num) { return (getFloatLine (x->iodesc, line_num, x->line)); } int getErrLine (ErrHdrLine *x, int line_num) { return (getFloatLine (x->iodesc, line_num, x->line)); } int getDQLine (DQHdrLine *x, int line_num) { return (getShortLine (x->iodesc, line_num, x->line)); } int getSingleGroup(char *fname, int ever, SingleGroup *x) { IODescPtr in; in = openInputImage(fname,"",0); if (hstio_err()) return -1; if (x->globalhdr != NULL) free(x->globalhdr); if (x->filename != NULL) free(x->filename); x->filename = (char *)calloc((strlen(fname) + 1),sizeof(char)); strcpy(x->filename,fname); x->globalhdr = (Hdr *)calloc(1,sizeof(Hdr)); if (x->globalhdr == NULL) return -1; initHdr(x->globalhdr); getHeader(in,x->globalhdr); if (hstio_err()) return -1; closeImage(in); x->group_num = ever; getSci(fname,ever,&(x->sci)); if (hstio_err()) return -1; getErr(fname,ever,&(x->err)); if (hstio_err()) return -1; getDQ(fname,ever,&(x->dq)); if (hstio_err()) return -1; clear_err(); return 0; } int getSingleGroupLine (char *fname, int line, SingleGroupLine *x) { x->line_num = line; getSciLine(&(x->sci), line); if (hstio_err()) return (-1); getErrLine(&(x->err), line); if (hstio_err()) return (-1); getDQLine(&(x->dq), line); if (hstio_err()) return (-1); clear_err(); return (0); } int putSingleGroupHdr(char *fname, SingleGroup *x, int option) { IODescPtr out = NULL; if (option == 0) out = openOutputImage(fname,"",0,x->globalhdr,0,0,FITSBYTE); else if (option & Overwrite) out = openUpdateImage(fname,"",0,x->globalhdr); if (hstio_err()) return -1; closeImage(out); clear_err(); return 0; } int putSingleGroup(char *fname, int ever, SingleGroup *x, int option) { struct stat buf; if (option == 0) { if (stat(fname,&buf) == -1) putSingleGroupHdr(fname,x,0); } putSci(fname,ever,&(x->sci),option); if (hstio_err()) return -1; putErr(fname,ever,&(x->err),option); if (hstio_err()) return -1; putDQ(fname,ever,&(x->dq),option); if (hstio_err()) return -1; clear_err(); return 0; } /* ** ** Routine to output a subsection of an imset in memory to a disk file where ** ** the subsection is the full size (NAXIS1/NAXIS2) of the output image. ** ** */ int putSingleGroupSect(char *fname, int ever, SingleGroup *x, int xbeg, int ybeg, int xsize, int ysize, int option) { struct stat buf; if (option == 0) { if (stat(fname,&buf) == -1) putSingleGroupHdr(fname,x,0); } putSciSect(fname,ever,&(x->sci),xbeg,ybeg,xsize,ysize,option); if (hstio_err()) return -1; putErrSect(fname,ever,&(x->err),xbeg,ybeg,xsize,ysize,option); if (hstio_err()) return -1; putDQSect (fname,ever,&(x->dq),xbeg,ybeg,xsize,ysize,option); if (hstio_err()) return -1; clear_err (); return 0; } int getSingleNicmosGroup(char *fname, int ever, SingleNicmosGroup *x) { IODescPtr in; in = openInputImage(fname,"",0); if (hstio_err()) return -1; if (x->globalhdr != NULL) free(x->globalhdr); if (x->filename != NULL) free(x->filename); x->filename = (char *)calloc((strlen(fname) + 1),sizeof(char)); strcpy(x->filename,fname); x->globalhdr = (Hdr *)calloc(1,sizeof(Hdr)); if (x->globalhdr == NULL) return -1; initHdr(x->globalhdr); getHeader(in,x->globalhdr); if (hstio_err()) return -1; closeImage(in); x->group_num = ever; getSci(fname,ever,&(x->sci)); if (hstio_err()) return -1; getErr(fname,ever,&(x->err)); if (hstio_err()) return -1; getDQ(fname,ever,&(x->dq)); if (hstio_err()) return -1; getSmpl(fname,ever,&(x->smpl)); if (hstio_err()) return -1; getIntg(fname,ever,&(x->intg)); if (hstio_err()) return -1; clear_err(); return 0; } int putSingleNicmosGroupHdr(char *fname, SingleNicmosGroup *x, int option) { IODescPtr out = NULL; if (option == 0) out = openOutputImage(fname,"",0,x->globalhdr,0,0,FITSBYTE); else if (option & Overwrite) out = openUpdateImage(fname,"",0,x->globalhdr); if (hstio_err()) return -1; closeImage(out); clear_err(); return 0; } int putSingleNicmosGroup(char *fname, int ever, SingleNicmosGroup *x, int option) { struct stat buf; if (option == 0) { if (stat(fname,&buf) == -1) putSingleNicmosGroupHdr(fname,x,0); } putSci(fname,ever,&(x->sci),option); if (hstio_err()) return -1; putErr(fname,ever,&(x->err),option); if (hstio_err()) return -1; putDQ(fname,ever,&(x->dq),option); if (hstio_err()) return -1; putSmpl(fname,ever,&(x->smpl),option); if (hstio_err()) return -1; putIntg(fname,ever,&(x->intg),option); if (hstio_err()) return -1; clear_err(); return 0; } /* ** ** Routine to output a subsection of an imset in memory to a disk file where ** ** the subsection is the full size (NAXIS1/NAXIS2) of the output image. ** ** */ int putSingleNicmosGroupSect(char *fname, int ever, SingleNicmosGroup *x, int xbeg, int ybeg, int xsize, int ysize, int option) { struct stat buf; if (option == 0) { if (stat(fname,&buf) == -1) putSingleNicmosGroupHdr(fname,x,0); } putSciSect(fname,ever,&(x->sci),xbeg,ybeg,xsize,ysize,option); if (hstio_err()) return -1; putErrSect(fname,ever,&(x->err),xbeg,ybeg,xsize,ysize,option); if (hstio_err()) return -1; putDQSect (fname,ever,&(x->dq),xbeg,ybeg,xsize,ysize,option); if (hstio_err()) return -1; putSmplSect(fname,ever,&(x->smpl),xbeg,ybeg,xsize,ysize,option); if (hstio_err()) return -1; putIntgSect(fname,ever,&(x->intg),xbeg,ybeg,xsize,ysize,option); if (hstio_err()) return -1; clear_err (); return 0; } int getMultiGroupHdr(char *fname, MultiGroup *x) { IODescPtr in; int i; in = openInputImage(fname,"",0); if (hstio_err()) return -1; getHeader(in,x->group[0].globalhdr); if (hstio_err()) return -1; closeImage(in); if (x->group[0].filename != NULL) free(x->group[0].filename); x->group[0].filename = (char *)calloc((strlen(fname) + 1),sizeof(char)); strcpy(x->group[0].filename,fname); for (i = 1; i < x->ngroups; ++i) { x->group[i].filename = x->group[0].filename; x->group[i].globalhdr = x->group[0].globalhdr; } clear_err(); return 0; } int getMultiGroup(MultiGroup *x, int ng, int ever) { if (ng < 0 || ng > x->ngroups) { error(BADGROUP,""); return -1; } x->group[ng].group_num = ever; getSci(x->group[ng].filename,ever,&(x->group[ng].sci)); if (hstio_err()) return -1; getErr(x->group[ng].filename,ever,&(x->group[ng].err)); if (hstio_err()) return -1; getDQ(x->group[ng].filename,ever,&(x->group[ng].dq)); if (hstio_err()) return -1; clear_err(); return 0; } int putMultiGroupHdr(char *fname, MultiGroup *x, int option) { IODescPtr out = NULL; if (option == 0) out = openOutputImage(fname,"",0,x->group[0].globalhdr,0,0,FITSBYTE); else if (option & Overwrite) out = openUpdateImage(fname,"",0,x->group[0].globalhdr); if (hstio_err()) return -1; putHeader(out); if (hstio_err()) return -1; closeImage(out); clear_err(); return 0; } int putMultiGroup(char *fname, int ever, MultiGroup *x, int ng, int option) { struct stat buf; if (ng < 0 || ng > x->ngroups) { error(BADGROUP,""); return -1; } if (option == 0) { if (stat(fname,&buf) == -1) putMultiGroupHdr(fname,x,0); } putSci(fname,ever,&(x->group[ng].sci),option); if (hstio_err()) return -1; putErr(fname,ever,&(x->group[ng].err),option); if (hstio_err()) return -1; putDQ(fname,ever,&(x->group[ng].dq),option); if (hstio_err()) return -1; clear_err(); return 0; } int getMultiNicmosGroupHdr(char *fname, MultiNicmosGroup *x) { IODescPtr in; int i; in = openInputImage(fname,"",0); if (hstio_err()) return -1; getHeader(in,x->group[0].globalhdr); if (hstio_err()) return -1; closeImage(in); if (x->group[0].filename != NULL) free(x->group[0].filename); x->group[0].filename = (char *)calloc((strlen(fname) + 1),sizeof(char)); strcpy(x->group[0].filename,fname); for (i = 1; i < x->ngroups; ++i) { x->group[i].filename = x->group[0].filename; x->group[i].globalhdr = x->group[0].globalhdr; } clear_err(); return 0; } int getMultiNicmosGroup(MultiNicmosGroup *x, int ng, int ever) { if (ng < 0 || ng > x->ngroups) { error(BADGROUP,""); return -1; } x->group[ng].group_num = ever; getSci(x->group[ng].filename,ever,&(x->group[ng].sci)); if (hstio_err()) return -1; getErr(x->group[ng].filename,ever,&(x->group[ng].err)); if (hstio_err()) return -1; getDQ(x->group[ng].filename,ever,&(x->group[ng].dq)); if (hstio_err()) return -1; getSmpl(x->group[ng].filename,ever,&(x->group[ng].smpl)); if (hstio_err()) return -1; getIntg(x->group[ng].filename,ever,&(x->group[ng].intg)); if (hstio_err()) return -1; clear_err(); return 0; } int putMultiNicmosGroupHdr(char *fname, MultiNicmosGroup *x, int option) { IODescPtr out = NULL; if (option == 0) out = openOutputImage(fname,"",0,x->group[0].globalhdr,0,0,FITSBYTE); else if (option & Overwrite) out = openUpdateImage(fname,"",0,x->group[0].globalhdr); if (hstio_err()) return -1; putHeader(out); if (hstio_err()) return -1; closeImage(out); clear_err(); return 0; } int putMultiNicmosGroup(char *fname, int ever, MultiNicmosGroup *x, int ng, int option) { struct stat buf; if (ng < 0 || ng > x->ngroups) { error(BADGROUP,""); return -1; } if (option == 0) { if (stat(fname,&buf) == -1) putMultiNicmosGroupHdr(fname,x,0); } putSci(fname,ever,&(x->group[ng].sci),option); if (hstio_err()) return -1; putErr(fname,ever,&(x->group[ng].err),option); if (hstio_err()) return -1; putDQ(fname,ever,&(x->group[ng].dq),option); if (hstio_err()) return -1; putSmpl(fname,ever,&(x->group[ng].smpl),option); if (hstio_err()) return -1; putIntg(fname,ever,&(x->group[ng].intg),option); if (hstio_err()) return -1; clear_err(); return 0; } /* ** Section 6. ** Functions to manipulate the header array. ** ** See the file keyword.c */ /* ** Section 7. ** ** High-level functions formerly in hstioirf.c */ /* CFITSIO TODO: store axes in IODesc object as array to more conveniently interface with CFITSIO */ static void detect_iraferr(void) { sprintf(error_msg,"\nIRAF error %d: %s\n",c_iraferr(), c_iraferrmsg()); } /* ** Make_iodesc takes a filename, extname, and extver and creates and ** initializes an IODesc structure. In the process, it builds a ** correct filename to be used in the open statement to IRAF. This ** constructed filename is returned. */ static char *make_iodesc(IODesc **x, char *fname, char *ename, int ever) { int i, n, flen; char *tmp; IODesc *iodesc; char xname[9]; iodesc = (IODesc *)calloc(1,sizeof(IODesc)); if (iodesc == NULL) { error(NOMEM,"Allocating I/O descriptor"); return NULL; } iodesc->ff = NULL; iodesc->filename = NULL; iodesc->extname = NULL; iodesc->extver = 0; iodesc->hflag = 0; iodesc->hdr = NULL; iodesc->dims[0] = 0; iodesc->dims[1] = 0; iodesc->type = 0; if (fname == 0) fname = ""; if (ename == 0) ename = ""; iodesc->filename = (char *)calloc(((flen = strlen(fname)) + 1), sizeof(char)); if (iodesc->filename == NULL) { free(iodesc); error(NOMEM,"Allocating I/O descriptor"); return NULL; } n = strlen(ename); if (n > 8) { ioerr(BADEXTNAME,iodesc,0); return NULL; } for (i = 0; i < n; ++i) xname[i] = toupper(ename[i]); for (--i; i >= 0 && xname[i] == ' '; --i) ; ++i; xname[i] = '\0'; iodesc->extname = (char *)calloc((strlen(xname) + 1),sizeof(char)); if (iodesc->extname == NULL) { free(iodesc->filename); free(iodesc); error(NOMEM,"Allocating I/O descriptor"); return NULL; } strcpy(iodesc->filename,fname); strcpy(iodesc->extname,xname); iodesc->extver = ever; /* make up the proper filename */ /* check for a request for the primary HDU */ tmp = (char *)calloc((flen + 80),sizeof(char)); if (tmp == NULL) { error(NOMEM,"Allocating I/O descriptor"); return NULL; } strcpy(tmp,fname); if (ever == 0 || ename == 0 || ename[0] == '\0' || ename[0] == ' ') strcat(tmp,"[0]"); else sprintf(&tmp[flen],"[%s,%d]",xname,ever); *x = iodesc; return tmp; } IODescPtr openInputImage(char *fname, char *ename, int ever) { IODesc *iodesc; int no_dims; char *tmp; char ospath[SZ_PATHNAME]; int open_mode; int status = 0; /* CFITSIO: Error handling */ c_pusherr(detect_iraferr); tmp = make_iodesc(&iodesc, fname, ename, ever); if (tmp == NULL) return NULL; iodesc->options = ReadOnly; /* CFITSIO: Resolve this inheritance stuff */ /* p = strstr(tmp,"[0]"); */ /* if (p == NULL) { */ /* tmp[strlen(tmp) - 1] = '\0'; */ /* strcat(tmp, ",NOINHERIT]"); */ /* } */ /* open the file using CFITSIO */ if (c_vfn2osfn(fname, ospath)) { free(tmp); return NULL; } open_mode = READONLY; if (c_vfn2osfn(tmp, ospath)) { free(tmp); return NULL; } free(tmp); if (fits_open_file(&iodesc->ff, ospath, open_mode, &status)) { ioerr(BADOPEN, iodesc, status); free(iodesc->extname); free(iodesc->filename); free(iodesc); return NULL; } /* get the dimensions and type */ fits_get_img_dim(iodesc->ff, &no_dims, &status); fits_get_img_equivtype(iodesc->ff, &iodesc->type, &status); fits_get_img_size(iodesc->ff, 2, iodesc->dims, &status); if (status) { ioerr(BADDIMS, iodesc, status); return NULL; } if (no_dims == 2) { /* Nothing */ } else if (no_dims == 1) { iodesc->dims[1] = 0; } else if (no_dims == 0) { iodesc->dims[0] = 0; iodesc->dims[1] = 0; } else { ioerr(BADDIMS, iodesc, 0); return NULL; } clear_err(); return iodesc; } IODescPtr openOutputImage(char *fname, char *ename, int ever, Hdr *hd, int d1, int d2, FitsDataType typ) { IODesc *iodesc; char *tmp; char date[12]; char date_card[81]; time_t t; struct tm *time_tmp; FitsKw kw; char ename_val[9]; int ever_val; char ospath[SZ_PATHNAME]; int status = 0; /* CFITSIO: Error handling */ c_pusherr(detect_iraferr); tmp = make_iodesc(&iodesc, fname, ename, ever); if (tmp == NULL) return NULL; iodesc->options = WriteOnly; if (ever == 0 || ename == 0 || ename[0] == '\0' || ename[0] == ' ') { int rtn = ckNewFile(fname); if (rtn == 1) { ioerr(BADEXIST, iodesc, 0); return NULL; } else if (rtn == 2) { ioerr(BADREMOVE, iodesc, 0); return NULL; } } /* CFITSIO: Check this INHERIT, APPEND nonsense works */ /* p = strstr(tmp, "[0]"); */ /* if (p == NULL) { */ /* tmp[strlen(tmp) - 1] = '\0'; */ /* strcat(tmp, ",INHERIT,APPEND]"); */ /* } */ /* else { */ /* tmp[strlen(tmp) - 3] = '\0'; /\* eliminate the "[0]" *\/ */ /* } */ /* make sure ename and ever are in the header array */ kw = findKw(hd, "EXTNAME"); if (kw == NotFound) { if (ever != 0 && ename != 0 && ename[0] != '\0' && ename[0] != ' ') { kw = insertfirst(hd); kw = insertStringKw(kw, "EXTNAME", ename, "Name of the extension"); } } else { /* Make sure it has the right value */ getStringKw(kw,ename_val,8); if (strncpy(ename_val, ename, strlen(ename)) != 0) putStringKw(kw,ename); } kw = findKw(hd,"EXTVER"); if (kw == NotFound) { if (ever != 0 && ename != 0 && ename[0] != '\0' && ename[0] != ' ') { kw = findKw(hd, "EXTNAME"); kw = insertIntKw(kw, "EXTVER", ever, "Extension version"); } } else { /* Make sure it has the right value */ ever_val = getIntKw(kw); if (ever != ever_val) putIntKw(kw, ever); } /* open or create the file using CFITSIO */ if (ever == 0 || ename == 0 || ename[0] == '\0' || ename[0] == ' ') { c_vfn2osfn(fname, ospath); fits_create_file(&iodesc->ff, ospath, &status); } else { c_vfn2osfn(fname, ospath); fits_open_file(&iodesc->ff, ospath, READWRITE, &status); } if (status) { ioerr(BADOPEN, iodesc, status); free(iodesc->extname); free(iodesc->filename); free(iodesc); return NULL; } free(tmp); iodesc->dims[0] = d1; iodesc->dims[1] = d2; /* IMIO would always set bitpix to 16 when the dimensions are naught */ if (d1 == 0 && d2 == 0) { iodesc->type = SHORT_IMG; } else { switch (typ) { case FITSBYTE: iodesc->type = BYTE_IMG; break; case FITSSHORT: iodesc->type = SHORT_IMG; break; case FITSLONG: iodesc->type = LONG_IMG; break; case FITSFLOAT: iodesc->type = FLOAT_IMG; break; case FITSDOUBLE: iodesc->type = DOUBLE_IMG; break; default: iodesc->type = SHORT_IMG; break; } } iodesc->hdr = hd; if (fits_create_img(iodesc->ff, iodesc->type, 2, iodesc->dims, &status)) { ioerr(BADOPEN, iodesc, status); return NULL; } if (fits_write_record(iodesc->ff, "ORIGIN = 'HSTIO/CFITSIO March 2010' / FITS file originator", &status)) { ioerr(BADWRITE, iodesc, status); return NULL; } t = time(NULL); time_tmp = localtime(&t); strftime(date, 12, "%Y-%m-%d", time_tmp); snprintf(date_card, 80, "DATE = '%s' / date this file was written (yyyy-mm-dd)", date); if (fits_write_record(iodesc->ff, date_card, &status)) { ioerr(BADWRITE, iodesc, status); return NULL; } iodesc->hflag = 1; /* mark to write header */ if (iodesc->dims[0] == 0) { putHeader(iodesc); iodesc->hflag = 0; } clear_err(); return iodesc; } IODescPtr openUpdateImage(char *fname, char *ename, int ever, Hdr *hd) { IODesc *iodesc; int no_dims; char *tmp; char ospath[SZ_PATHNAME]; int status = 0; /* CFITSIO: Error handling */ c_pusherr(detect_iraferr); tmp = make_iodesc(&iodesc, fname, ename, ever); if (tmp == NULL) return NULL; iodesc->options = ReadWrite; /* CFITSIO: Resolve this keyword inheritance stuff */ /* p = strstr(tmp,"[0]"); */ /* if (p == NULL) { */ /* tmp[strlen(tmp) - 1] = '\0'; */ /* strcat(tmp,",NOINHERIT]"); */ /* } */ /* open the file using CFITSIO */ c_vfn2osfn(tmp, ospath); if (fits_open_file(&iodesc->ff, ospath, READWRITE, &status)) { ioerr(BADOPEN, iodesc, status); free(tmp); free(iodesc->extname); free(iodesc->filename); free(iodesc); return NULL; } free(tmp); /* get the dimensions and type */ fits_get_img_dim(iodesc->ff, &no_dims, &status); fits_get_img_equivtype(iodesc->ff, &iodesc->type, &status); fits_get_img_size(iodesc->ff, 2, iodesc->dims, &status); if (status) { ioerr(BADDIMS, iodesc, status); return NULL; } if (no_dims == 2) { /* Nothing */ } else if (no_dims == 1) { iodesc->dims[1] = 0; } else if (no_dims == 0) { iodesc->dims[0] = 0; iodesc->dims[1] = 0; } else { ioerr(BADDIMS, iodesc, 0); return NULL; } /* read the user area into the header array */ getHeader(iodesc, hd); clear_err(); return iodesc; } void closeImage(IODescPtr iodesc_) { IODesc *iodesc = (IODesc *)iodesc_; int status = 0; if (iodesc->options != ReadOnly && iodesc->dims[0] != 0) putHeader(iodesc); if (fits_close_file(iodesc->ff, &status)) { /* TODO: Raise error */ } /* This is a handy check to use pyfits to validate the file upon every close */ /* c_vfn2osfn(iodesc->filename, ospath); */ /* sprintf(system_string, "python -c \"import pyfits; pyfits.open('%s')\"", ospath); */ /* if (system(system_string)) { */ /* printf("LOG: pyfits corruption!!!\n"); */ /* exit(1); */ /* } */ /* if (there is an IRAF error) */ /* ioerr(IRAF_CLOSE,iodesc); */ free(iodesc->extname); free(iodesc->filename); free(iodesc); /* CFITSIO: Error handling */ c_poperr(); } /* According to the imio documentation, the following reserved keywords are recognized: SIMPLE BITPIX DATATYPE NAXIS* GROUPS GCOUNT PCOUNT PSIZE PTYPE* PDTYPE* PSIZE* XTENSION */ static char* reservedKwds[] = { "BITPIX ", "BSCALE ", "BZERO ", "DATAMAX ", "DATAMIN ", "DATATYPE", "DATE ", "EXTEND ", "GCOUNT ", "GROUPS ", "NAXIS ", "NAXIS* ", "ORIGIN ", "PCOUNT ", "PDTYPE* ", "PSIZE ", "PSIZE* ", "PTYPE* ", "SIMPLE ", "XTENSION", NULL }; /* Whole cards to not allow in the user area. Must remain alphabetized. */ static char* reservedCards[] = { "COMMENT FITS (Flexible Image Transport System) format is defined in 'Astronomy", "COMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H ", NULL }; int isReservedKwd(const char* card) { /* CFITSIO: Should this be made case-insensitive? */ /* TODO: Maybe use a binary search? */ /* Returns 1 if the card matches one of the reserved keywords */ int i; int match; char** kwd = reservedKwds; int cmp; for (kwd = reservedCards; *kwd != NULL; ++kwd) { cmp = strncmp(*kwd, card, 79); if (cmp == 0) { return 1; } } for (kwd = reservedKwds; *kwd != NULL; ++kwd) { /* Short-circuit if we're certain not to find the kwd later in the list */ if ((*kwd)[0] > card[0]) { return 0; } match = 1; for (i = 0; i < 8; ++i) { /* '*' indicates a digit */ if ((*kwd)[i] == '*') { if (card[i] < '0' || card[i] > '9') { match = 0; break; } } else if ((*kwd)[i] != card[i]) { match = 0; break; } } if (match) { return 1; } } return 0; } int getHeader(IODescPtr iodesc_, Hdr *hd) { IODesc *iodesc = (IODesc *)iodesc_; int ncards, i, j; char source[HDRSize]; char *target; int status = 0; if (iodesc->options == WriteOnly) { ioerr(NOGET, iodesc, 0); return -1; } /* get the number of cards in the header */ if (fits_get_hdrspace(iodesc->ff, &ncards, NULL, &status)) { ioerr(BADHSIZE, iodesc, status); return -1; } /* allocate space for the header cards */ if (allocHdr(hd, ncards, True) == -1) return -1; /* translate the data */ hd->nlines = 0; for (i = 0; i < ncards; ++i) { if (fits_read_record(iodesc->ff, i+1, source, &status)) { ioerr(BADREAD, iodesc, status); return -1; } if (!isReservedKwd(source)) { target = hd->array[hd->nlines]; for (j = 0; j < (HDRSize -1); ++j) { *target++ = source[j]; } *target++ = '\0'; hd->nlines++; } } iodesc->hdr = hd; clear_err(); return 0; } int putHeader(IODescPtr iodesc_) { IODesc *iodesc = (IODesc *)iodesc_; int i, j, tmp; int numkeys; int found_non_space; char *source; char card[81]; int status = 0; if (iodesc->options == ReadOnly) { ioerr(NOPUT, iodesc, status); return -1; } if (iodesc->hflag) { /* CFITSIO: We probably need to move this in front of all calls to fits_create_img */ /* If the image is actually 1-dimensional, modify the naxis2 * value so the output header is written with only NAXIS and * NAXIS1 keywords, where NAXIS=1, and NAXIS1=number. */ if (iodesc->dims[0] != 0 && iodesc->dims[1] == 1) iodesc->dims[1] = 0; /* set the pixel type */ fits_update_key(iodesc->ff, TINT, "BITPIX", &(iodesc->type), NULL, &status); if (status) { ioerr(BADWRITE, iodesc, status); return -1; } if (iodesc->dims[0] == 0 && iodesc->dims[1] == 0) { tmp = 0; fits_update_key(iodesc->ff, TINT, "NAXIS", &tmp, NULL, &status); if (status) { ioerr(BADWRITE, iodesc, status); return -1; } fits_delete_key(iodesc->ff, "NAXIS1", &status); if (status == KEY_NO_EXIST) { fits_clear_errmsg(); status = 0; } fits_delete_key(iodesc->ff, "NAXIS2", &status); if (status == KEY_NO_EXIST) { fits_clear_errmsg(); status = 0; } } else if (iodesc->dims[0] != 0 && iodesc->dims[1] == 0) { /* set the number of dimensions */ tmp = 1; fits_update_key(iodesc->ff, TINT, "NAXIS", &tmp, NULL, &status); if (status) { ioerr(BADWRITE, iodesc, status); return -1; } /* set dim1 */ fits_update_key(iodesc->ff, TINT, "NAXIS1", &iodesc->dims[0], NULL, &status); if (status) { ioerr(BADWRITE, iodesc, status); return -1; } fits_delete_key(iodesc->ff, "NAXIS2", &status); if (status == KEY_NO_EXIST) { fits_clear_errmsg(); status = 0; } } else { /* set the number of dimensions */ tmp = 2; fits_update_key(iodesc->ff, TINT, "NAXIS", &tmp, NULL, &status); /* set dim1 and dim2 */ fits_update_key(iodesc->ff, TINT, "NAXIS1", &iodesc->dims[0], NULL, &status); fits_update_key(iodesc->ff, TINT, "NAXIS2", &iodesc->dims[1], NULL, &status); } if (status) { ioerr(BADWRITE, iodesc, status); } } /* Verify the size of the user area */ /* The original code just memcopies the cards into the "user area" of the header. CFITSIO doesn't have the concept of a "user area", so we need to carefully only copy the cards that are not "reserved". */ if (fits_get_hdrspace(iodesc->ff, &numkeys, NULL, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } for (i = 0, j = numkeys; i < numkeys; ++i, --j) { if (fits_read_record(iodesc->ff, j, card, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } if (!isReservedKwd(card)) { if (fits_delete_record(iodesc->ff, j, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } } else { ++j; } } /* translate the data */ /* Skip blank cards at the beginning */ found_non_space = 0; for (i = 0; i < iodesc->hdr->nlines; ++i) { source = iodesc->hdr->array[i]; for (j = 0; j < 80; ++j) { if (source[j] != ' ' && source[j] != '\n' && source[j] != 0) { found_non_space = 1; break; } } if (found_non_space) { break; } } for (/* i from above */; i < iodesc->hdr->nlines; ++i) { source = iodesc->hdr->array[i]; if (!isReservedKwd(source)) { if (fits_write_record(iodesc->ff, source, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } } } /* If we don't explicitly set BSCALE and BZERO to 1.0 and 0.0 here, their values could be inadvertently brought over from the source image. This was the source of a very hard-to-find bug. */ if (iodesc->type == TFLOAT || iodesc->type == TDOUBLE) { fits_set_bscale(iodesc->ff, 1.0, 0.0, &status); } clear_err(); return 0; } int getFloatData(IODescPtr iodesc_, FloatTwoDArray *da) { IODesc *iodesc = (IODesc *)iodesc_; int no_dims, i, j; long fpixel[2]; int anynul; int type; FitsKw kw; float val; int status = 0; if (iodesc->options == WriteOnly) { ioerr(NOGET,iodesc, 0); return -1; } fits_get_img_dim(iodesc->ff, &no_dims, &status); fits_get_img_size(iodesc->ff, 2, iodesc->dims, &status); if (status) { ioerr(BADDIMS, iodesc, status); return -1; } /* If the number of dimensions of the image is zero, need to determine how many dimensions the image is supposed to have according to the NPIX[1/2] keyword(s). */ if (no_dims == 0) { kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc, 0); return -1; } val = getFloatKw(kw); kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc, 0); return -1; } iodesc->dims[0] = getIntKw(kw); /* If NPIX2 is not found, the image should be 1D; dim2 = 1 and * * not 0 for purposes of memory allocation. */ kw = findKw(iodesc->hdr,"NPIX2"); if (kw == 0) { iodesc->dims[1] = 1; } else { iodesc->dims[1] = getIntKw(kw); } if (allocFloatData(da, iodesc->dims[0], iodesc->dims[1], False)) return -1; for (j = 0; j < iodesc->dims[1]; ++j) { for (i = 0; i < iodesc->dims[0]; ++i) { PPix(da, i, j) = val; } } } else if (no_dims == 1) { iodesc->dims[1] = 1; fits_get_img_equivtype(iodesc->ff, &type, &status); /* CFITSIO TODO: Should we verify the type is correct here? Original code gets type, but then does nothing with it. */ if (allocFloatData(da, iodesc->dims[0], iodesc->dims[1], True)) return -1; fpixel[0] = 1; fpixel[1] = 1; if (fits_read_pix(iodesc->ff, TFLOAT, fpixel, iodesc->dims[0], 0, (float *)&(PPix(da, 0, 0)), &anynul, &status)) { ioerr(BADREAD, iodesc, status); return -1; } } else if (no_dims == 2) { fits_get_img_equivtype(iodesc->ff, &type, &status); /* CFITSIO TODO: Should we verify the type is correct here? Original code gets type, but then does nothing with it. */ if (allocFloatData(da, iodesc->dims[0], iodesc->dims[1], True)) return -1; fpixel[0] = 1; if (da->storageOrder == ROWMAJOR) { for (i = 0; i < iodesc->dims[1]; ++i) { fpixel[1] = i + 1; if (fits_read_pix(iodesc->ff, TFLOAT, fpixel, iodesc->dims[0], 0, &(PPix(da, 0, i)), &anynul, &status)) { ioerr(BADREAD,iodesc, status); return -1; } } } else { unsigned nColumns = iodesc->dims[0]; float * row = malloc(nColumns*sizeof(float)); if (!row) return OUT_OF_MEMORY; for (i = 0; i < iodesc->dims[1]; ++i) { fpixel[1] = i + 1; if (fits_read_pix(iodesc->ff, TFLOAT, fpixel, nColumns, 0, row, &anynul, &status)) { ioerr(BADREAD,iodesc, status); return -1; } {unsigned j; for (j = 0; j < nColumns; ++j) PPixColumnMajor(da, i, j) = row[j]; } } if (row) free(row); } } else { ioerr(BADDIMS,iodesc,0); return -1; } clear_err(); return 0; } int putFloatData(IODescPtr iodesc_, FloatTwoDArray *da) { IODesc *iodesc = (IODesc *)iodesc_; int i, j; float tmp; long fpixel[2]; FitsKw kw; int is_eq; int naxis; long dims[2]; int status = 0; if (iodesc->options == ReadOnly) { ioerr(NOPUT,iodesc,0); return -1; } /* check for a constant array, if not SCI data */ if (strcmp(iodesc->extname,"SCI") != 0 && da->tot_nx != 0 && da->tot_ny != 0) { tmp = PPix(da,0,0); for (i = 0, is_eq = 1; (i < da->tot_nx) && is_eq; ++i) { for (j = 0; (j < da->tot_ny); ++j) { if (PPix(da,i,j) != tmp) { is_eq = 0; break; } } } if (is_eq) { /* This is a constant array. */ /* add NPIX1, NPIX2 (if necessary), and PIXVALUE keywords */ kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) /* add it */ addFloatKw(iodesc->hdr,"PIXVALUE",tmp, "values of pixels in constant array"); else putFloatKw(kw,tmp); kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"NPIX1",iodesc->dims[0], "length of constant array axis 1"); else putIntKw(kw,iodesc->dims[0]); /* NPIX2 should only be added if the y-dimension is > 1. */ if (da->tot_ny > 1) { kw = findKw(iodesc->hdr,"NPIX2"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"NPIX2",iodesc->dims[1], "length of constant array axis 2"); else putIntKw(kw,iodesc->dims[1]); } naxis = 0; fits_update_key(iodesc->ff, TINT, "NAXIS", &naxis, NULL, &status); iodesc->dims[0] = 0; iodesc->dims[1] = 0; if (fits_resize_img(iodesc->ff, FLOAT_IMG, 0, iodesc->dims, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } /* update the header, etc. */ if (iodesc->hflag) { iodesc->type = FLOAT_IMG; putHeader(iodesc); iodesc->hflag = 0; } fits_flush_file(iodesc->ff, &status); return 0; } } /* If not a constant array, make sure NPIX1, NPIX2, and PIXVALUE * * are NOT present in the header to be written out. */ kw = findKw(iodesc->hdr,"NPIX1"); if (kw != 0) /* remove it */ delKw(kw); if (da->tot_ny > 1) { kw = findKw(iodesc->hdr,"NPIX2"); if (kw != 0) /* remove it */ delKw(kw); } kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw != 0) /* remove it */ delKw(kw); /* Get the current CFITSIO size, and if it's different, resize it */ fits_get_img_size(iodesc->ff, 2, dims, &status); if (dims[0] != da->nx || dims[1] != da->ny) { iodesc->dims[0] = da->nx; iodesc->dims[1] = da->ny; if (fits_resize_img(iodesc->ff, FLOAT_IMG, 2, iodesc->dims, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } } /* update the header area */ if (iodesc->hflag) { iodesc->type = FLOAT_IMG; putHeader(iodesc); iodesc->hflag = 0; } fpixel[0] = 1; for (i = 0; i < da->ny; ++i) { fpixel[1] = i + 1; if (fits_write_pix(iodesc->ff, TFLOAT, fpixel, da->nx, (float *)&(PPix(da, 0, i)), &status)) { ioerr(BADWRITE, iodesc, status); return -1; } } fits_flush_file(iodesc->ff, &status); clear_err(); return 0; } /* ** ** Write output a subsection of an image in memory to a file where the ** ** subsection is the full size of the output data. ** ** */ int putFloatSect(IODescPtr iodesc_, FloatTwoDArray *da, int xbeg, int ybeg, int xsize, int ysize) { IODesc *iodesc = (IODesc *)iodesc_; int i, j, xend, yend; float tmp; FitsKw kw; long fpixel[2]; long dims[2]; int is_eq; int naxis; int status = 0; /* CFITSIO: Verify that the section is within range? */ if (iodesc->options == ReadOnly) { ioerr(NOPUT,iodesc, 0); return -1; } xend = xbeg + xsize; yend = ybeg + ysize; /* check for a constant array, if not SCI data */ if (strcmp(iodesc->extname,"SCI") != 0 && da->tot_nx != 0 && da->tot_ny != 0) { tmp = PPix(da, 0, 0); for (i = xbeg, is_eq = 1; (i < xend) && is_eq; ++i) { for (j = ybeg; (j < yend); ++j) { if (PPix(da,i,j) != tmp) { is_eq = 0; break; } } } if (is_eq) { /* This is a constant array. */ /* add NPIX1, NPIX2 (if necessary), and PIXVALUE keywords */ kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) /* add it */ addFloatKw(iodesc->hdr,"PIXVALUE",tmp, "values of pixels in constant array"); else putFloatKw(kw,tmp); kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"NPIX1",iodesc->dims[0], "length of constant array axis 1"); else putIntKw(kw,iodesc->dims[0]); /* NPIX2 should only be added if the y-dimension is > 1. */ if (da->tot_ny > 1) { kw = findKw(iodesc->hdr,"NPIX2"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"NPIX2",iodesc->dims[1], "length of constant array axis 2"); else putIntKw(kw,iodesc->dims[1]); } naxis = 0; fits_update_key(iodesc->ff, TINT, "NAXIS", &naxis, NULL, &status); iodesc->dims[0] = 0; iodesc->dims[1] = 0; /* update the header, etc. */ if (iodesc->hflag) { iodesc->type = FLOAT_IMG; putHeader(iodesc); iodesc->hflag = 0; } fits_flush_file(iodesc->ff, &status); clear_err(); return 0; } } /* If not a constant array, make sure NPIX1, NPIX2, and PIXVALUE * * are NOT present in the header to be written out. */ kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw != 0) /* remove it */ delKw(kw); kw = findKw(iodesc->hdr,"NPIX1"); if (kw != 0) /* remove it */ delKw(kw); if (da->tot_ny > 1) { kw = findKw(iodesc->hdr,"NPIX2"); if (kw != 0) /* remove it */ delKw(kw); } /* Get the current CFITSIO size, and if it's different, resize it */ fits_get_img_size(iodesc->ff, 2, dims, &status); if (dims[0] != xend - xbeg || dims[1] != yend - ybeg) { iodesc->dims[0] = xend - xbeg; iodesc->dims[1] = yend - ybeg; if (fits_resize_img(iodesc->ff, FLOAT_IMG, 2, iodesc->dims, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } /* Note, we don't need to fill the image with the constant value, since the image will be entirely over-written by the passed in array da */ } /* update the header area */ if (iodesc->hflag) { iodesc->type = FLOAT_IMG; putHeader(iodesc); iodesc->hflag = 0; } fpixel[0] = 1; for (i = ybeg; i < yend; ++i) { fpixel[1] = i - ybeg + 1; if (fits_write_pix(iodesc->ff, TFLOAT, fpixel, xsize, (float*)&(PPix(da, xbeg, i)), &status)) { ioerr(BADWRITE, iodesc, status); return -1; } } fflush(stdout); clear_err(); return 0; } int getShortData(IODescPtr iodesc_, ShortTwoDArray *da) { IODesc *iodesc = (IODesc *)iodesc_; int no_dims, i, j; FitsKw kw; short val; long fpixel[2]; int anynul = 0; int status = 0; if (iodesc->options == WriteOnly) { ioerr(NOGET,iodesc, 0); return -1; } fits_get_img_dim(iodesc->ff, &no_dims, &status); fits_get_img_size(iodesc->ff, 2, iodesc->dims, &status); if (status) { ioerr(BADDIMS, iodesc, status); return -1; } /* If the number of dimensions of the image is zero, need to determine how many dimensions the image is supposed to have according to the NPIX[1/2] keyword(s). */ if (no_dims == 0) { kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc,0); return -1; } val = getIntKw(kw); kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc,0); return -1; } iodesc->dims[0] = getIntKw(kw); /* If NPIX2 is not found, the image should be 1D; dim2 = 1 and * * not 0 for purposes of memory allocation. */ kw = findKw(iodesc->hdr,"NPIX2"); if (kw == 0) { iodesc->dims[1] = 1; } else { iodesc->dims[1] = getIntKw(kw); } if (allocShortData(da, iodesc->dims[0], iodesc->dims[1], True)) return -1; for (j = 0; j < iodesc->dims[1]; ++j) for (i = 0; i < iodesc->dims[0]; ++i) PPix(da, i, j) = val; } else if (no_dims == 1) { iodesc->dims[1] = 1; /* CFITSIO TODO: Should we verify the type is correct here? Original code gets type, but then does nothing with it. */ if (allocShortData(da, iodesc->dims[0], iodesc->dims[1], True)) return -1; fpixel[0] = 1; fpixel[1] = 1; if (fits_read_pix(iodesc->ff, TSHORT, fpixel, iodesc->dims[0], NULL, (short *)&(PPix(da, 0, 0)), &anynul, &status)) { ioerr(BADREAD, iodesc, status); return -1; } } else if (no_dims == 2) { /* CFITSIO TODO: Should we verify the type is correct here? Original code gets type, but then does nothing with it. */ if (allocShortData(da, iodesc->dims[0], iodesc->dims[1], True)) return -1; fpixel[0] = 1; for (i = 0; i < iodesc->dims[1]; ++i) { fpixel[1] = i + 1; if (fits_read_pix(iodesc->ff, TSHORT, fpixel, iodesc->dims[0], NULL, (short *)&(PPix(da, 0, i)), &anynul, &status)) { ioerr(BADREAD, iodesc, status); return -1; } } } else { ioerr(BADDIMS, iodesc, 0); return -1; } clear_err(); return 0; } int putShortData(IODescPtr iodesc_, ShortTwoDArray *da) { IODesc *iodesc = (IODesc *)iodesc_; int i, j; short tmp; long fpixel[2]; FitsKw kw; int is_eq; int naxis; long dims[2]; int status = 0; if (iodesc->options == ReadOnly) { ioerr(NOPUT,iodesc,0); return -1; } /* check for a constant array, if not SCI data */ if (strcmp(iodesc->extname,"SCI") != 0 && da->tot_nx != 0 && da->tot_ny != 0) { tmp = PPix(da,0,0); for (i = 0, is_eq = 1; (i < da->tot_nx) && is_eq; ++i) { for (j = 0; (j < da->tot_ny); ++j) { if (PPix(da,i,j) != tmp) { is_eq = 0; break; } } } if (is_eq) { /* This is a constant array. */ /* add NPIX1, NPIX2 (if necessary), and PIXVALUE keywords */ kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"PIXVALUE",(int)tmp, "values of pixels in constant array"); else putIntKw(kw,(int)tmp); kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"NPIX1",iodesc->dims[0], "length of constant array axis 1"); else putIntKw(kw,iodesc->dims[0]); /* NPIX2 should only be added if the y-dimension is > 1. */ if (da->tot_ny > 1) { kw = findKw(iodesc->hdr,"NPIX2"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"NPIX2",iodesc->dims[1], "length of constant array axis 2"); else putIntKw(kw,iodesc->dims[1]); } naxis = 0; fits_update_key(iodesc->ff, TINT, "NAXIS", &naxis, NULL, &status); iodesc->dims[0] = 0; iodesc->dims[1] = 0; if (fits_resize_img(iodesc->ff, SHORT_IMG, 0, iodesc->dims, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } /* update the header, etc. */ if (iodesc->hflag) { iodesc->type = SHORT_IMG; putHeader(iodesc); iodesc->hflag = 0; } fits_flush_file(iodesc->ff, &status); clear_err(); return 0; } } /* If not a constant array, make sure NPIX1, NPIX2, and PIXVALUE * * are NOT present in the header to be written out. */ kw = findKw(iodesc->hdr,"NPIX1"); if (kw != 0) /* remove it */ delKw(kw); if (da->tot_ny > 1) { kw = findKw(iodesc->hdr,"NPIX2"); if (kw != 0) /* remove it */ delKw(kw); } kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw != 0) /* remove it */ delKw(kw); /* Get the current CFITSIO size, and if it's different, resize it */ fits_get_img_size(iodesc->ff, 2, dims, &status); if (dims[0] != da->nx || dims[1] != da->ny) { iodesc->dims[0] = da->nx; iodesc->dims[1] = da->ny; if (fits_resize_img(iodesc->ff, SHORT_IMG, 2, iodesc->dims, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } } /* update the header area */ if (iodesc->hflag) { iodesc->type = SHORT_IMG; putHeader(iodesc); iodesc->hflag = 0; } fpixel[0] = 1; for (i = 0; i < da->ny; ++i) { fpixel[1] = i + 1; if (fits_write_pix(iodesc->ff, TSHORT, fpixel, da->nx, (short *)&(PPix(da, 0, i)), &status)) { ioerr(BADWRITE, iodesc, status); return -1; } } fits_flush_file(iodesc->ff, &status); clear_err(); return 0; } /* ** ** Write output a subsection of an image in memory to a file where the ** ** subsection is the full size of the output data. ** ** */ int putShortSect(IODescPtr iodesc_, ShortTwoDArray *da, int xbeg, int ybeg, int xsize, int ysize) { IODesc *iodesc = (IODesc *)iodesc_; int i, j, xend, yend; short tmp; FitsKw kw; int naxis; int is_eq; long fpixel[2]; long dims[2]; int status = 0; if (iodesc->options == ReadOnly) { ioerr(NOPUT,iodesc,0); return -1; } xend = xbeg + xsize; yend = ybeg + ysize; /* check for a constant array, if not SCI data */ if (strcmp(iodesc->extname,"SCI") != 0 && da->tot_nx != 0 && da->tot_ny != 0) { tmp = PPix(da,0,0); for (i = xbeg, is_eq = 1; (i < xend) && is_eq; ++i) { for (j = ybeg; (j < yend); ++j) { if (PPix(da,i,j) != tmp) { is_eq = 0; break; } } } if (is_eq) { /* This is a constant array. */ /* add NPIX1, NPIX2 (if necessary), and PIXVALUE keywords */ kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"PIXVALUE",(int)tmp, "values of pixels in constant array"); else putIntKw(kw,(int)tmp); kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"NPIX1",iodesc->dims[0], "length of constant array axis 1"); else putIntKw(kw,iodesc->dims[0]); /* NPIX2 should only be added if the y-dimension is > 1. */ if (da->tot_ny > 1) { kw = findKw(iodesc->hdr,"NPIX2"); if (kw == 0) /* add it */ addIntKw(iodesc->hdr,"NPIX2",iodesc->dims[1], "length of constant array axis 2"); else putIntKw(kw,iodesc->dims[1]); } naxis = 0; fits_update_key(iodesc->ff, TINT, "NAXIS", &naxis, NULL, &status); iodesc->dims[0] = 0; iodesc->dims[1] = 0; /* update the header, etc. */ if (iodesc->hflag) { iodesc->type = SHORT_IMG; putHeader(iodesc); iodesc->hflag = 0; } fits_flush_file(iodesc->ff, &status); clear_err(); return 0; } } /* If not a constant array, make sure NPIX1, NPIX2, and PIXVALUE * * are NOT present in the header to be written out. */ kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw != 0) /* remove it */ delKw(kw); kw = findKw(iodesc->hdr,"NPIX1"); if (kw != 0) /* remove it */ delKw(kw); if (da->tot_ny > 1) { kw = findKw(iodesc->hdr,"NPIX2"); if (kw != 0) /* remove it */ delKw(kw); } /* Get the current CFITSIO size, and if it's different, resize it */ fits_get_img_size(iodesc->ff, 2, dims, &status); if (dims[0] != xend - xbeg || dims[1] != yend - ybeg) { iodesc->dims[0] = xend - xbeg; iodesc->dims[1] = yend - ybeg; if (fits_resize_img(iodesc->ff, SHORT_IMG, 2, iodesc->dims, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } /* Note, we don't need to fill the image with the constant value, since the image will be entirely over-written by the passed in array da */ } /* update the header area */ if (iodesc->hflag) { iodesc->type = SHORT_IMG; putHeader(iodesc); iodesc->hflag = 0; } fpixel[0] = 1; for (i = ybeg; i < yend; ++i) { fpixel[1] = i - ybeg + 1; if (fits_write_pix(iodesc->ff, TSHORT, fpixel, xsize, (short *)&(PPix(da, xbeg, i)), &status)) { ioerr(BADWRITE,iodesc, status); return -1; } } fits_flush_file(iodesc->ff, &status); clear_err(); return 0; } int getFloatLine(IODescPtr iodesc_, int line, float *ptr) { IODesc *iodesc = (IODesc *)iodesc_; int no_dims, i, dim1; long dims[2]; FitsKw kw; float val; long fpixel[2]; int anynul; int status = 0; if (iodesc->options == WriteOnly) { ioerr(NOGET,iodesc,0); return -1; } if (fits_get_img_dim(iodesc->ff, &no_dims, &status)) { ioerr(BADDIMS, iodesc, status); return -1; } if (no_dims == 0) { kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc,0); return -1; } dim1 = getIntKw(kw); kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc,0); return -1; } val = getFloatKw(kw); for (i = 0; i < dim1; ++i) { ptr[i] = val; } } else { if (fits_get_img_size(iodesc->ff, 2, dims, &status)) { ioerr(BADDIMS, iodesc, status); return -1; } fpixel[0] = 1; fpixel[1] = line + 1; if (fits_read_pix(iodesc->ff, TFLOAT, fpixel, dims[0], NULL, ptr, &anynul, &status)) { ioerr(BADREAD, iodesc, status); return -1; } } clear_err(); return 0; } int putFloatLine(IODescPtr iodesc_, int line, float *ptr) { IODesc *iodesc = (IODesc *)iodesc_; long fpixel[2]; int no_dims; long dims[2]; FitsKw kw; float* buffer; float val; long i, j; int status = 0; if (iodesc->options == ReadOnly) { ioerr(NOPUT,iodesc,0); return -1; } if (iodesc->hflag) { iodesc->hflag = 0; putHeader(iodesc); } /* If a constant array, convert to a non-constant array */ fits_get_img_dim(iodesc->ff, &no_dims, &status); if (no_dims == 0) { kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) { ioerr(BADSCIDIMS, iodesc, 0); return -1; } else { dims[0] = getIntKw(kw); delKw(kw); } kw = findKw(iodesc->hdr,"NPIX2"); if (kw == 0) { ioerr(BADSCIDIMS, iodesc, 0); return -1; } else { dims[1] = getIntKw(kw); delKw(kw); } kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc,0); return -1; } else { val = getFloatKw(kw); delKw(kw); } iodesc->dims[0] = dims[0]; iodesc->dims[1] = dims[1]; if (fits_resize_img(iodesc->ff, FLOAT_IMG, 2, dims, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } buffer = malloc(dims[0] * sizeof(float)); if (buffer == NULL) { ioerr(BADWRITE, iodesc, status); return -1; } for (i = 0; i < dims[0]; ++i) { buffer[i] = val; } /* Write the constant value into CFITSIO's array */ fpixel[0] = 1; for (j = 0; j < dims[1]; ++j) { fpixel[1] = j + 1; if (fits_write_pix(iodesc->ff, TFLOAT, fpixel, dims[0], buffer, &status)) { ioerr(BADWRITE, iodesc, status); free(buffer); return -1; } } free(buffer); } fpixel[0] = 1; fpixel[1] = line + 1; if (fits_write_pix(iodesc->ff, TFLOAT, fpixel, iodesc->dims[0], ptr, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } clear_err(); return 0; } int getShortLine(IODescPtr iodesc_, int line, short *ptr) { IODesc *iodesc = (IODesc *)iodesc_; int no_dims, dim1, i; long dims[2]; FitsKw kw; short val; long fpixel[2]; int anynul; int status = 0; if (iodesc->options == WriteOnly) { ioerr(NOGET,iodesc,0); return -1; } if (fits_get_img_dim(iodesc->ff, &no_dims, &status)) { ioerr(BADDIMS, iodesc, status); return -1; } if (no_dims == 0) { kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc,0); return -1; } dim1 = getIntKw(kw); kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc,0); return -1; } val = getIntKw(kw); for (i = 0; i < dim1; ++i) ptr[i] = val; } else { if (fits_get_img_size(iodesc->ff, 2, dims, &status)) { ioerr(BADDIMS, iodesc, status); return -1; } fpixel[0] = 1; fpixel[1] = line + 1; if (fits_read_pix(iodesc->ff, TSHORT, fpixel, dims[0], NULL, ptr, &anynul, &status)) { ioerr(BADREAD, iodesc, status); return -1; } } clear_err(); return 0; } int putShortLine(IODescPtr iodesc_, int line, short *ptr) { IODesc *iodesc = (IODesc *)iodesc_; long fpixel[2]; long dims[2]; int no_dims; FitsKw kw; short val; short* buffer; long i, j; int status = 0; if (iodesc->options == ReadOnly) { ioerr(NOPUT,iodesc,0); return -1; } if (iodesc->hflag) { iodesc->hflag = 0; putHeader(iodesc); } /* If a constant array, convert to a non-constant array */ fits_get_img_dim(iodesc->ff, &no_dims, &status); if (no_dims == 0) { kw = findKw(iodesc->hdr,"NPIX1"); if (kw == 0) { ioerr(BADSCIDIMS, iodesc, 0); return -1; } else { dims[0] = getIntKw(kw); delKw(kw); } kw = findKw(iodesc->hdr,"NPIX2"); if (kw == 0) { ioerr(BADSCIDIMS, iodesc, 0); return -1; } else { dims[1] = getIntKw(kw); delKw(kw); } kw = findKw(iodesc->hdr,"PIXVALUE"); if (kw == 0) { ioerr(BADSCIDIMS,iodesc,0); return -1; } else { val = getIntKw(kw); delKw(kw); } iodesc->dims[0] = dims[0]; iodesc->dims[1] = dims[1]; if (fits_resize_img(iodesc->ff, SHORT_IMG, 2, dims, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } buffer = malloc(dims[0] * sizeof(short)); if (buffer == NULL) { ioerr(BADWRITE, iodesc, status); return -1; } for (i = 0; i < dims[0]; ++i) { buffer[i] = val; } /* Write the constant value into CFITSIO's array */ fpixel[0] = 1; for (j = 0; j < dims[1]; ++j) { fpixel[1] = j + 1; if (fits_write_pix(iodesc->ff, TSHORT, fpixel, dims[0], buffer, &status)) { ioerr(BADWRITE, iodesc, status); free(buffer); return -1; } } free(buffer); } fpixel[0] = 1; fpixel[1] = line + 1; if (fits_write_pix(iodesc->ff, TSHORT, fpixel, iodesc->dims[0], ptr, &status)) { ioerr(BADWRITE, iodesc, status); return -1; } clear_err(); return 0; }
a.33.1.c
/* { dg-do compile } */ /* { dg-require-effective-target tls } */ #include <stdio.h> float x, y; #pragma omp threadprivate(x, y) void init (float a, float b) { #pragma omp single copyprivate(a,b,x,y) { scanf ("%f %f %f %f", &a, &b, &x, &y); } }
OMPIRBuilder.h
//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the OpenMPIRBuilder class and helpers used as a convenient // way to create LLVM instructions for OpenMP directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Allocator.h" #include <forward_list> namespace llvm { class CanonicalLoopInfo; /// An interface to create LLVM-IR for OpenMP directives. /// /// Each OpenMP directive has a corresponding public generator method. class OpenMPIRBuilder { public: /// Create a new OpenMPIRBuilder operating on the given module \p M. This will /// not have an effect on \p M (see initialize). OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {} ~OpenMPIRBuilder(); /// Initialize the internal state, this will put structures types and /// potentially other helpers into the underlying module. Must be called /// before any other method and only once! void initialize(); /// Finalize the underlying module, e.g., by outlining regions. /// \param Fn The function to be finalized. If not used, /// all functions are finalized. /// \param AllowExtractorSinking Flag to include sinking instructions, /// emitted by CodeExtractor, in the /// outlined region. Default is false. void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false); /// Add attributes known for \p FnID to \p Fn. void addAttributes(omp::RuntimeFunction FnID, Function &Fn); /// Type used throughout for insertion points. using InsertPointTy = IRBuilder<>::InsertPoint; /// Callback type for variable finalization (think destructors). /// /// \param CodeGenIP is the insertion point at which the finalization code /// should be placed. /// /// A finalize callback knows about all objects that need finalization, e.g. /// destruction, when the scope of the currently generated construct is left /// at the time, and location, the callback is invoked. using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>; struct FinalizationInfo { /// The finalization callback provided by the last in-flight invocation of /// createXXXX for the directive of kind DK. FinalizeCallbackTy FiniCB; /// The directive kind of the innermost directive that has an associated /// region which might require finalization when it is left. omp::Directive DK; /// Flag to indicate if the directive is cancellable. bool IsCancellable; }; /// Push a finalization callback on the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void pushFinalizationCB(const FinalizationInfo &FI) { FinalizationStack.push_back(FI); } /// Pop the last finalization callback from the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void popFinalizationCB() { FinalizationStack.pop_back(); } /// Callback type for body (=inner region) code generation /// /// The callback takes code locations as arguments, each describing a /// location at which code might need to be generated or a location that is /// the target of control transfer. /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the body code should be /// placed. /// \param ContinuationBB is the basic block target to leave the body. /// /// Note that all blocks pointed to by the arguments have terminators. using BodyGenCallbackTy = function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; // This is created primarily for sections construct as llvm::function_ref // (BodyGenCallbackTy) is not storable (as described in the comments of // function_ref class - function_ref contains non-ownable reference // to the callable. using StorableBodyGenCallbackTy = std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; /// Callback type for loop body code generation. /// /// \param CodeGenIP is the insertion point where the loop's body code must be /// placed. This will be a dedicated BasicBlock with a /// conditional branch from the loop condition check and /// terminated with an unconditional branch to the loop /// latch. /// \param IndVar is the induction variable usable at the insertion point. using LoopBodyGenCallbackTy = function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>; /// Callback type for variable privatization (think copy & default /// constructor). /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the privatization code /// should be placed. /// \param Original The value being copied/created, should not be used in the /// generated IR. /// \param Inner The equivalent of \p Original that should be used in the /// generated IR; this is equal to \p Original if the value is /// a pointer and can thus be passed directly, otherwise it is /// an equivalent but different value. /// \param ReplVal The replacement value, thus a copy or new created version /// of \p Inner. /// /// \returns The new insertion point where code generation continues and /// \p ReplVal the replacement value. using PrivatizeCallbackTy = function_ref<InsertPointTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)>; /// Description of a LLVM-IR insertion point (IP) and a debug/source location /// (filename, line, column, ...). struct LocationDescription { template <typename T, typename U> LocationDescription(const IRBuilder<T, U> &IRB) : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {} LocationDescription(const InsertPointTy &IP) : IP(IP) {} LocationDescription(const InsertPointTy &IP, const DebugLoc &DL) : IP(IP), DL(DL) {} InsertPointTy IP; DebugLoc DL; }; /// Emitter methods for OpenMP directives. /// ///{ /// Generator for '#omp barrier' /// /// \param Loc The location where the barrier directive was encountered. /// \param DK The kind of directive that caused the barrier. /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall = false, bool CheckCancelFlag = true); /// Generator for '#omp cancel' /// /// \param Loc The location where the directive was encountered. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param CanceledDirective The kind of directive that is cancled. /// /// \returns The insertion point after the barrier. InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective); /// Generator for '#omp parallel' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param BodyGenCB Callback that will generate the region code. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param NumThreads The evaluated 'num_threads' clause expression, if any. /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind). /// \param IsCancellable Flag to indicate a cancellable parallel region. /// /// \returns The insertion position *after* the parallel. IRBuilder<>::InsertPoint createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// This generator operates on the logical iteration space of the loop, i.e. /// the caller only has to provide a loop trip count of the loop as defined by /// base language semantics. The trip count is interpreted as an unsigned /// integer. The induction variable passed to \p BodyGenCB will be of the same /// type and run from 0 to \p TripCount - 1. It is up to the callback to /// convert the logical iteration variable to the loop counter variable in the /// loop body. /// /// \param Loc The insert and source location description. The insert /// location can be between two instructions or the end of a /// degenerate block (e.g. a BB under construction). /// \param BodyGenCB Callback that will generate the loop body code. /// \param TripCount Number of iterations the loop body is executed. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name = "loop"); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// Instead of a logical iteration space, this allows specifying user-defined /// loop counter values using increment, upper- and lower bounds. To /// disambiguate the terminology when counting downwards, instead of lower /// bounds we use \p Start for the loop counter value in the first body /// iteration. /// /// Consider the following limitations: /// /// * A loop counter space over all integer values of its bit-width cannot be /// represented. E.g using uint8_t, its loop trip count of 256 cannot be /// stored into an 8 bit integer): /// /// DO I = 0, 255, 1 /// /// * Unsigned wrapping is only supported when wrapping only "once"; E.g. /// effectively counting downwards: /// /// for (uint8_t i = 100u; i > 0; i += 127u) /// /// /// TODO: May need to add additional parameters to represent: /// /// * Allow representing downcounting with unsigned integers. /// /// * Sign of the step and the comparison operator might disagree: /// /// for (int i = 0; i < 42; i -= 1u) /// // /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the loop body code. /// \param Start Value of the loop counter for the first iterations. /// \param Stop Loop counter values past this will stop the loop. /// \param Step Loop counter increment after each iteration; negative /// means counting down. /// \param IsSigned Whether Start, Stop and Step are signed integers. /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop /// counter. /// \param ComputeIP Insertion point for instructions computing the trip /// count. Can be used to ensure the trip count is available /// at the outermost loop of a loop nest. If not set, /// defaults to the preheader of the generated loop. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, InsertPointTy ComputeIP = {}, const Twine &Name = "loop"); /// Collapse a loop nest into a single loop. /// /// Merges loops of a loop nest into a single CanonicalLoopNest representation /// that has the same number of innermost loop iterations as the origin loop /// nest. The induction variables of the input loops are derived from the /// collapsed loop's induction variable. This is intended to be used to /// implement OpenMP's collapse clause. Before applying a directive, /// collapseLoops normalizes a loop nest to contain only a single loop and the /// directive's implementation does not need to handle multiple loops itself. /// This does not remove the need to handle all loop nest handling by /// directives, such as the ordered(<n>) clause or the simd schedule-clause /// modifier of the worksharing-loop directive. /// /// Example: /// \code /// for (int i = 0; i < 7; ++i) // Canonical loop "i" /// for (int j = 0; j < 9; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After collapsing with Loops={i,j}, the loop is changed to /// \code /// for (int ij = 0; ij < 63; ++ij) { /// int i = ij / 9; /// int j = ij % 9; /// body(i, j); /// } /// \endcode /// /// In the current implementation, the following limitations apply: /// /// * All input loops have an induction variable of the same type. /// /// * The collapsed loop will have the same trip count integer type as the /// input loops. Therefore it is possible that the collapsed loop cannot /// represent all iterations of the input loops. For instance, assuming a /// 32 bit integer type, and two input loops both iterating 2^16 times, the /// theoretical trip count of the collapsed loop would be 2^32 iteration, /// which cannot be represented in an 32-bit integer. Behavior is undefined /// in this case. /// /// * The trip counts of every input loop must be available at \p ComputeIP. /// Non-rectangular loops are not yet supported. /// /// * At each nest level, code between a surrounding loop and its nested loop /// is hoisted into the loop body, and such code will be executed more /// often than before collapsing (or not at all if any inner loop iteration /// has a trip count of 0). This is permitted by the OpenMP specification. /// /// \param DL Debug location for instructions added for collapsing, /// such as instructions to compute/derive the input loop's /// induction variables. /// \param Loops Loops in the loop nest to collapse. Loops are specified /// from outermost-to-innermost and every control flow of a /// loop's body must pass through its directly nested loop. /// \param ComputeIP Where additional instruction that compute the collapsed /// trip count. If not set, defaults to before the generated /// loop. /// /// \returns The CanonicalLoopInfo object representing the collapsed loop. CanonicalLoopInfo *collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, InsertPointTy ComputeIP); /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// TODO: Workshare loops with static scheduling may contain up to two loops /// that fulfill the requirements of an OpenMP canonical loop. One for /// iterating over all iterations of a chunk and another one for iterating /// over all chunks that are executed on the same thread. Returning /// CanonicalLoopInfo objects representing them may eventually be useful for /// the apply clause planned in OpenMP 6.0, but currently whether these are /// canonical loops is irrelevant. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be inserted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a dynamically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain, and then in each iteration /// to update the loop counter. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param SchedType Type of scheduling to be passed to the init function. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, omp::OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier); /// Tile a loop nest. /// /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in /// \p/ Loops must be perfectly nested, from outermost to innermost loop /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value /// of every loop and every tile sizes must be usable in the outermost /// loop's preheader. This implies that the loop nest is rectangular. /// /// Example: /// \code /// for (int i = 0; i < 15; ++i) // Canonical loop "i" /// for (int j = 0; j < 14; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to /// \code /// for (int i1 = 0; i1 < 3; ++i1) /// for (int j1 = 0; j1 < 2; ++j1) /// for (int i2 = 0; i2 < 5; ++i2) /// for (int j2 = 0; j2 < 7; ++j2) /// body(i1*3+i2, j1*3+j2); /// \endcode /// /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also /// handles non-constant trip counts, non-constant tile sizes and trip counts /// that are not multiples of the tile size. In the latter case the tile loop /// of the last floor-loop iteration will have fewer iterations than specified /// as its tile size. /// /// /// @param DL Debug location for instructions added by tiling, for /// instance the floor- and tile trip count computation. /// @param Loops Loops to tile. The CanonicalLoopInfo objects are /// invalidated by this method, i.e. should not used after /// tiling. /// @param TileSizes For each loop in \p Loops, the tile size for that /// dimensions. /// /// \returns A list of generated loops. Contains twice as many loops as the /// input loop nest; the first half are the floor loops and the /// second half are the tile loops. std::vector<CanonicalLoopInfo *> tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, ArrayRef<Value *> TileSizes); /// Generator for '#omp flush' /// /// \param Loc The location where the flush directive was encountered void createFlush(const LocationDescription &Loc); /// Generator for '#omp taskwait' /// /// \param Loc The location where the taskwait directive was encountered. void createTaskwait(const LocationDescription &Loc); /// Generator for '#omp taskyield' /// /// \param Loc The location where the taskyield directive was encountered. void createTaskyield(const LocationDescription &Loc); /// Functions used to generate reductions. Such functions take two Values /// representing LHS and RHS of the reduction, respectively, and a reference /// to the value that is updated to refer to the reduction result. using ReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>; /// Functions used to generate atomic reductions. Such functions take two /// Values representing pointers to LHS and RHS of the reduction. They are /// expected to atomically update the LHS to the reduced value. using AtomicReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *)>; /// Information about an OpenMP reduction. struct ReductionInfo { ReductionInfo(Value *Variable, Value *PrivateVariable, ReductionGenTy ReductionGen, AtomicReductionGenTy AtomicReductionGen) : Variable(Variable), PrivateVariable(PrivateVariable), ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) {} /// Returns the type of the element being reduced. Type *getElementType() const { return Variable->getType()->getPointerElementType(); } /// Reduction variable of pointer type. Value *Variable; /// Thread-private partial reduction variable. Value *PrivateVariable; /// Callback for generating the reduction body. The IR produced by this will /// be used to combine two values in a thread-safe context, e.g., under /// lock or within the same thread, and therefore need not be atomic. ReductionGenTy ReductionGen; /// Callback for generating the atomic reduction body, may be null. The IR /// produced by this will be used to atomically combine two values during /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. AtomicReductionGenTy AtomicReductionGen; }; // TODO: provide atomic and non-atomic reduction generators for reduction // operators defined by the OpenMP specification. /// Generator for '#omp reduction'. /// /// Emits the IR instructing the runtime to perform the specific kind of /// reductions. Expects reduction variables to have been privatized and /// initialized to reduction-neutral values separately. Emits the calls to /// runtime functions as well as the reduction function and the basic blocks /// performing the reduction atomically and non-atomically. /// /// The code emitted for the following: /// /// \code /// type var_1; /// type var_2; /// #pragma omp <directive> reduction(reduction-op:var_1,var_2) /// /* body */; /// \endcode /// /// corresponds to the following sketch. /// /// \code /// void _outlined_par() { /// // N is the number of different reductions. /// void *red_array[] = {privatized_var_1, privatized_var_2, ...}; /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array, /// _omp_reduction_func, /// _gomp_critical_user.reduction.var)) { /// case 1: { /// var_1 = var_1 <reduction-op> privatized_var_1; /// var_2 = var_2 <reduction-op> privatized_var_2; /// // ... /// __kmpc_end_reduce(...); /// break; /// } /// case 2: { /// _Atomic<ReductionOp>(var_1, privatized_var_1); /// _Atomic<ReductionOp>(var_2, privatized_var_2); /// // ... /// break; /// } /// default: break; /// } /// } /// /// void _omp_reduction_func(void **lhs, void **rhs) { /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0]; /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1]; /// // ... /// } /// \endcode /// /// \param Loc The location where the reduction was /// encountered. Must be within the associate /// directive and after the last local access to the /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. /// \param Variables A list of variables in which the reduction /// results will be stored (values of pointer type). /// \param PrivateVariables A list of variables in which the partial /// reduction results are stored (values of pointer /// type). Coindexed with Variables. Privatization /// must be handled separately from this call. /// \param ReductionGen A list of generators for non-atomic reduction /// bodies. Each takes a pair of partially reduced /// values and sets a new one. /// \param AtomicReductionGen A list of generators for atomic reduction /// bodies, empty if the reduction cannot be /// performed with atomics. Each takes a pair of /// _pointers_ to paritally reduced values and /// atomically stores the result into the first. /// \param IsNoWait A flag set if the reduction is marked as nowait. InsertPointTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait = false); ///} /// Return the insertion point used by the underlying IRBuilder. InsertPointTy getInsertionPoint() { return Builder.saveIP(); } /// Update the internal location to \p Loc. bool updateToLocation(const LocationDescription &Loc) { Builder.restoreIP(Loc.IP); Builder.SetCurrentDebugLocation(Loc.DL); return Loc.IP.getBlock() != nullptr; } /// Return the function declaration for the runtime function with \p FnID. FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID); Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID); /// Return the (LLVM-IR) string describing the source location \p LocStr. Constant *getOrCreateSrcLocStr(StringRef LocStr); /// Return the (LLVM-IR) string describing the default source location. Constant *getOrCreateDefaultSrcLocStr(); /// Return the (LLVM-IR) string describing the source location identified by /// the arguments. Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName, unsigned Line, unsigned Column); /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as /// fallback if \p DL does not specify the function name. Constant *getOrCreateSrcLocStr(DebugLoc DL, Function *F = nullptr); /// Return the (LLVM-IR) string describing the source location \p Loc. Constant *getOrCreateSrcLocStr(const LocationDescription &Loc); /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags. /// TODO: Create a enum class for the Reserve2Flags Value *getOrCreateIdent(Constant *SrcLocStr, omp::IdentFlag Flags = omp::IdentFlag(0), unsigned Reserve2Flags = 0); // Get the type corresponding to __kmpc_impl_lanemask_t from the deviceRTL Type *getLanemaskType(); /// Generate control flow and cleanup for cancellation. /// /// \param CancelFlag Flag indicating if the cancellation is performed. /// \param CanceledDirective The kind of directive that is cancled. /// \param ExitCB Extra code to be generated in the exit block. void emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB = {}); /// Generate a barrier runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. /// \param DK The directive which caused the barrier /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy emitBarrierImpl(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall, bool CheckCancelFlag); /// Generate a flush runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitFlush(const LocationDescription &Loc); /// The finalization stack made up of finalize callbacks currently in-flight, /// wrapped into FinalizationInfo objects that reference also the finalization /// target block and the kind of cancellable directive. SmallVector<FinalizationInfo, 8> FinalizationStack; /// Return true if the last entry in the finalization stack is of kind \p DK /// and cancellable. bool isLastFinalizationInfoCancellable(omp::Directive DK) { return !FinalizationStack.empty() && FinalizationStack.back().IsCancellable && FinalizationStack.back().DK == DK; } /// Generate a taskwait runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskwaitImpl(const LocationDescription &Loc); /// Generate a taskyield runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskyieldImpl(const LocationDescription &Loc); /// Return the current thread ID. /// /// \param Ident The ident (ident_t*) describing the query origin. Value *getOrCreateThreadID(Value *Ident); /// The underlying LLVM-IR module Module &M; /// The LLVM-IR Builder used to create IR. IRBuilder<> Builder; /// Map to remember source location strings StringMap<Constant *> SrcLocStrMap; /// Map to remember existing ident_t*. DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap; /// Helper that contains information about regions we need to outline /// during finalization. struct OutlineInfo { using PostOutlineCBTy = std::function<void(Function &)>; PostOutlineCBTy PostOutlineCB; BasicBlock *EntryBB, *ExitBB; /// Collect all blocks in between EntryBB and ExitBB in both the given /// vector and set. void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet, SmallVectorImpl<BasicBlock *> &BlockVector); /// Return the function that contains the region to be outlined. Function *getFunction() const { return EntryBB->getParent(); } }; /// Collection of regions that need to be outlined during finalization. SmallVector<OutlineInfo, 16> OutlineInfos; /// Collection of owned canonical loop objects that eventually need to be /// free'd. std::forward_list<CanonicalLoopInfo> LoopInfos; /// Add a new region that will be outlined later. void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); } /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars; /// Create the global variable holding the offload mappings information. GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, std::string VarName); /// Create the global variable holding the offload names information. GlobalVariable * createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, std::string VarName); struct MapperAllocas { AllocaInst *ArgsBase = nullptr; AllocaInst *Args = nullptr; AllocaInst *ArgSizes = nullptr; }; /// Create the allocas instruction used in call to mapper functions. void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas); /// Create the call for the target mapper function. /// \param Loc The source location description. /// \param MapperFunc Function to be called. /// \param SrcLocInfo Source location information global. /// \param MaptypesArgs /// \param MapnamesArg /// \param MapperAllocas The AllocaInst used for the call. /// \param DeviceID Device ID for the call. /// \param TotalNbOperand Number of operand in the call. void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands); public: /// Generator for __kmpc_copyprivate /// /// \param Loc The source location description. /// \param BufSize Number of elements in the buffer. /// \param CpyBuf List of pointers to data to be copied. /// \param CpyFn function to call for copying data. /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise. /// /// \return The insertion position *after* the CopyPrivate call. InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt); /// Generator for '#omp single' /// /// \param Loc The source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param DidIt Local variable used as a flag to indicate 'single' thread /// /// \returns The insertion position *after* the single call. InsertPointTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, llvm::Value *DidIt); /// Generator for '#omp master' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// /// \returns The insertion position *after* the master. InsertPointTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generator for '#omp masked' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finialize variable copies. /// /// \returns The insertion position *after* the masked. InsertPointTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter); /// Generator for '#omp critical' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \param CriticalName name of the lock used by the critical directive /// \param HintInst Hint Instruction for hint clause associated with critical /// /// \returns The insertion position *after* the critical. InsertPointTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst); /// Generator for '#omp sections' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param SectionCBs Callbacks that will generate body of each section. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IsCancellable Flag to indicate a cancellable parallel region. /// \param IsNowait If true, barrier - to ensure all sections are executed /// before moving forward will not be generated. /// \returns The insertion position *after* the sections. InsertPointTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait); /// Generator for '#omp section' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \returns The insertion position *after* the section. InsertPointTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generate conditional branch and relevant BasicBlocks through which private /// threads copy the 'copyin' variables from Master copy to threadprivate /// copies. /// /// \param IP insertion block for copyin conditional /// \param MasterVarPtr a pointer to the master variable /// \param PrivateVarPtr a pointer to the threadprivate variable /// \param IntPtrTy Pointer size type /// \param BranchtoEnd Create a branch between the copyin.not.master blocks // and copy.in.end block /// /// \returns The insertion point where copying operation to be emitted. InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd = true); /// Create a runtime call for kmpc_Alloc /// /// \param Loc The insert and source location description. /// \param Size Size of allocated memory space /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_alloc /// /// \returns CallInst to the OMP_Alloc call CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_free /// /// \param Loc The insert and source location description. /// \param Addr Address of memory space to be freed /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_Free /// /// \returns CallInst to the OMP_Free call CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_threadprivate_cached /// /// \param Loc The insert and source location description. /// \param Pointer pointer to data to be cached /// \param Size size of data to be cached /// \param Name Name of call Instruction for callinst /// /// \returns CallInst to the thread private cache call. CallInst *createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name = Twine("")); /// The `omp target` interface /// /// For more information about the usage of this interface, /// \see openmp/libomptarget/deviceRTLs/common/include/target.h /// ///{ /// Create a runtime call for kmpc_target_init /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); /// Create a runtime call for kmpc_target_deinit /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); ///} /// Declarations for LLVM-IR types (simple, array, function and structure) are /// generated below. Their names are defined and used in OpenMPKinds.def. Here /// we provide the declarations, the initializeTypes function will provide the /// values. /// ///{ #define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr; #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ ArrayType *VarName##Ty = nullptr; \ PointerType *VarName##PtrTy = nullptr; #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ FunctionType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #define OMP_STRUCT_TYPE(VarName, StrName, ...) \ StructType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #include "llvm/Frontend/OpenMP/OMPKinds.def" ///} private: /// Create all simple and struct types exposed by the runtime and remember /// the llvm::PointerTypes of them for easy access later. void initializeTypes(Module &M); /// Common interface for generating entry calls for OMP Directives. /// if the directive has a region/body, It will set the insertion /// point to the body /// /// \param OMPD Directive to generate entry blocks for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitBB block where the region ends. /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional = false); /// Common interface to finalize the region /// /// \param OMPD Directive to generate exiting code for /// \param FinIP Insertion point for emitting Finalization code and exit call /// \param ExitCall Call to the ending OMP Runtime Function /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, bool HasFinalize = true); /// Common Interface to generate OMP inlined regions /// /// \param OMPD Directive to generate inlined region for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitCall Call to the ending OMP Runtime Function /// \param BodyGenCB Body code generation callback. /// \param FiniCB Finalization Callback. Will be called when finalizing region /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// \param IsCancellable if HasFinalize is set to true, indicate if the /// the directive should be cancellable. /// \return The insertion point after the region InsertPointTy EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional = false, bool HasFinalize = true, bool IsCancellable = false); /// Get the platform-specific name separator. /// \param Parts different parts of the final name that needs separation /// \param FirstSeparator First separator used between the initial two /// parts of the name. /// \param Separator separator used between all of the rest consecutive /// parts of the name static std::string getNameWithSeparators(ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name, unsigned AddressSpace = 0); /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// Value *getOMPCriticalRegionLock(StringRef CriticalName); /// Callback type for Atomic Expression update /// ex: /// \code{.cpp} /// unsigned x = 0; /// #pragma omp atomic update /// x = Expr(x_old); //Expr() is any legal operation /// \endcode /// /// \param XOld the value of the atomic memory address to use for update /// \param IRB reference to the IRBuilder to use /// /// \returns Value to update X to. using AtomicUpdateCallbackTy = const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>; private: enum AtomicKind { Read, Write, Update, Capture }; /// Determine whether to emit flush or not /// /// \param Loc The insert and source location description. /// \param AO The required atomic ordering /// \param AK The OpenMP atomic operation kind used. /// /// \returns wether a flush was emitted or not bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc, AtomicOrdering AO, AtomicKind AK); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic /// instructions. /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, /// or belong to {FADD, FSUB, BAD_BINOP}. /// Then a `cmpExch` based atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param VolatileX true if \a X volatile? /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of /// the update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \returns A pair of the old value of X before the update, and the value /// used for the update. std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXLHSInRHSPart); /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 . /// /// \Return The instruction Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2, AtomicRMWInst::BinOp RMWOp); public: /// a struct to pack relevant information while generating atomic Ops struct AtomicOpValue { Value *Var = nullptr; bool IsSigned = false; bool IsVolatile = false; }; /// Emit atomic Read for : V = X --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically read /// \param V Memory address where to store atomically read /// value /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic read IR. InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO); /// Emit atomic write for : X = Expr --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically written to /// \param Expr The value to store. /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic Write IR. InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions. /// \param RMWOp The binary operation used for update. If operation /// is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of /// the update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \return Insertion point after generated atomic update IR. InsertPointTy createAtomicUpdate(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart); /// Emit atomic update for constructs: --- Only Scalar data types /// V = X; X = X BinOp Expr , /// X = X BinOp Expr; V = X, /// V = X; X = Expr BinOp X, /// X = Expr BinOp X; V = X, /// V = X; X = UpdateOp(X), /// X = UpdateOp(X); V = X, /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param V Memory address where to store captured value /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param UpdateExpr true if X is an in place update of the form /// X = X BinOp Expr or X = Expr BinOp X /// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart); /// Create the control flow structure of a canonical OpenMP loop. /// /// The emitted loop will be disconnected, i.e. no edge to the loop's /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's /// IRBuilder location is not preserved. /// /// \param DL DebugLoc used for the instructions in the skeleton. /// \param TripCount Value to be used for the trip count. /// \param F Function in which to insert the BasicBlocks. /// \param PreInsertBefore Where to insert BBs that execute before the body, /// typically the body itself. /// \param PostInsertBefore Where to insert BBs that execute after the body. /// \param Name Base name used to derive BB /// and instruction names. /// /// \returns The CanonicalLoopInfo that represents the emitted loop. CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name = {}); }; /// Class to represented the control flow structure of an OpenMP canonical loop. /// /// The control-flow structure is standardized for easy consumption by /// directives associated with loops. For instance, the worksharing-loop /// construct may change this control flow such that each loop iteration is /// executed on only one thread. The constraints of a canonical loop in brief /// are: /// /// * The number of loop iterations must have been computed before entering the /// loop. /// /// * Has an (unsigned) logical induction variable that starts at zero and /// increments by one. /// /// * The loop's CFG itself has no side-effects. The OpenMP specification /// itself allows side-effects, but the order in which they happen, including /// how often or whether at all, is unspecified. We expect that the frontend /// will emit those side-effect instructions somewhere (e.g. before the loop) /// such that the CanonicalLoopInfo itself can be side-effect free. /// /// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated /// execution of a loop body that satifies these constraints. It does NOT /// represent arbitrary SESE regions that happen to contain a loop. Do not use /// CanonicalLoopInfo for such purposes. /// /// The control flow can be described as follows: /// /// Preheader /// | /// /-> Header /// | | /// | Cond---\ /// | | | /// | Body | /// | | | | /// | <...> | /// | | | | /// \--Latch | /// | /// Exit /// | /// After /// /// The loop is thought to start at PreheaderIP (at the Preheader's terminator, /// including) and end at AfterIP (at the After's first instruction, excluding). /// That is, instructions in the Preheader and After blocks (except the /// Preheader's terminator) are out of CanonicalLoopInfo's control and may have /// side-effects. Typically, the Preheader is used to compute the loop's trip /// count. The instructions from BodyIP (at the Body block's first instruction, /// excluding) until the Latch are also considered outside CanonicalLoopInfo's /// control and thus can have side-effects. The body block is the single entry /// point into the loop body, which may contain arbitrary control flow as long /// as all control paths eventually branch to the Latch block. /// /// TODO: Consider adding another standardized BasicBlock between Body CFG and /// Latch to guarantee that there is only a single edge to the latch. It would /// make loop transformations easier to not needing to consider multiple /// predecessors of the latch (See redirectAllPredecessorsTo) and would give us /// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that /// executes after each body iteration. /// /// There must be no loop-carried dependencies through llvm::Values. This is /// equivalant to that the Latch has no PHINode and the Header's only PHINode is /// for the induction variable. /// /// All code in Header, Cond, Latch and Exit (plus the terminator of the /// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked /// by assertOK(). They are expected to not be modified unless explicitly /// modifying the CanonicalLoopInfo through a methods that applies a OpenMP /// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop, /// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its /// basic blocks. After invalidation, the CanonicalLoopInfo must not be used /// anymore as its underlying control flow may not exist anymore. /// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop /// may also return a new CanonicalLoopInfo that can be passed to other /// loop-associated construct implementing methods. These loop-transforming /// methods may either create a new CanonicalLoopInfo usually using /// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and /// modify one of the input CanonicalLoopInfo and return it as representing the /// modified loop. What is done is an implementation detail of /// transformation-implementing method and callers should always assume that the /// CanonicalLoopInfo passed to it is invalidated and a new object is returned. /// Returned CanonicalLoopInfo have the same structure and guarantees as the one /// created by createCanonicalLoop, such that transforming methods do not have /// to special case where the CanonicalLoopInfo originated from. /// /// Generally, methods consuming CanonicalLoopInfo do not need an /// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the /// CanonicalLoopInfo to insert new or modify existing instructions. Unless /// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate /// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically, /// any InsertPoint in the Preheader, After or Block can still be used after /// calling such a method. /// /// TODO: Provide mechanisms for exception handling and cancellation points. /// /// Defined outside OpenMPIRBuilder because nested classes cannot be /// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h. class CanonicalLoopInfo { friend class OpenMPIRBuilder; private: BasicBlock *Preheader = nullptr; BasicBlock *Header = nullptr; BasicBlock *Cond = nullptr; BasicBlock *Body = nullptr; BasicBlock *Latch = nullptr; BasicBlock *Exit = nullptr; BasicBlock *After = nullptr; /// Add the control blocks of this loop to \p BBs. /// /// This does not include any block from the body, including the one returned /// by getBody(). /// /// FIXME: This currently includes the Preheader and After blocks even though /// their content is (mostly) not under CanonicalLoopInfo's control. /// Re-evaluated whether this makes sense. void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs); public: /// Returns whether this object currently represents the IR of a loop. If /// returning false, it may have been consumed by a loop transformation or not /// been intialized. Do not use in this case; bool isValid() const { return Header; } /// The preheader ensures that there is only a single edge entering the loop. /// Code that must be execute before any loop iteration can be emitted here, /// such as computing the loop trip count and begin lifetime markers. Code in /// the preheader is not considered part of the canonical loop. BasicBlock *getPreheader() const { assert(isValid() && "Requires a valid canonical loop"); return Preheader; } /// The header is the entry for each iteration. In the canonical control flow, /// it only contains the PHINode for the induction variable. BasicBlock *getHeader() const { assert(isValid() && "Requires a valid canonical loop"); return Header; } /// The condition block computes whether there is another loop iteration. If /// yes, branches to the body; otherwise to the exit block. BasicBlock *getCond() const { assert(isValid() && "Requires a valid canonical loop"); return Cond; } /// The body block is the single entry for a loop iteration and not controlled /// by CanonicalLoopInfo. It can contain arbitrary control flow but must /// eventually branch to the \p Latch block. BasicBlock *getBody() const { assert(isValid() && "Requires a valid canonical loop"); return Body; } /// Reaching the latch indicates the end of the loop body code. In the /// canonical control flow, it only contains the increment of the induction /// variable. BasicBlock *getLatch() const { assert(isValid() && "Requires a valid canonical loop"); return Latch; } /// Reaching the exit indicates no more iterations are being executed. BasicBlock *getExit() const { assert(isValid() && "Requires a valid canonical loop"); return Exit; } /// The after block is intended for clean-up code such as lifetime end /// markers. It is separate from the exit block to ensure, analogous to the /// preheader, it having just a single entry edge and being free from PHI /// nodes should there be multiple loop exits (such as from break /// statements/cancellations). BasicBlock *getAfter() const { assert(isValid() && "Requires a valid canonical loop"); return After; } /// Returns the llvm::Value containing the number of loop iterations. It must /// be valid in the preheader and always interpreted as an unsigned integer of /// any bit-width. Value *getTripCount() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *CmpI = &Cond->front(); assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); return CmpI->getOperand(1); } /// Returns the instruction representing the current logical induction /// variable. Always unsigned, always starting at 0 with an increment of one. Instruction *getIndVar() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *IndVarPHI = &Header->front(); assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI"); return IndVarPHI; } /// Return the type of the induction variable (and the trip count). Type *getIndVarType() const { assert(isValid() && "Requires a valid canonical loop"); return getIndVar()->getType(); } /// Return the insertion point for user code before the loop. OpenMPIRBuilder::InsertPointTy getPreheaderIP() const { assert(isValid() && "Requires a valid canonical loop"); return {Preheader, std::prev(Preheader->end())}; }; /// Return the insertion point for user code in the body. OpenMPIRBuilder::InsertPointTy getBodyIP() const { assert(isValid() && "Requires a valid canonical loop"); return {Body, Body->begin()}; }; /// Return the insertion point for user code after the loop. OpenMPIRBuilder::InsertPointTy getAfterIP() const { assert(isValid() && "Requires a valid canonical loop"); return {After, After->begin()}; }; Function *getFunction() const { assert(isValid() && "Requires a valid canonical loop"); return Header->getParent(); } /// Consistency self-check. void assertOK() const; /// Invalidate this loop. That is, the underlying IR does not fulfill the /// requirements of an OpenMP canonical loop anymore. void invalidate(); }; } // end namespace llvm #endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
GB_unop__identity_fp64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_int32 // op(A') function: GB_unop_tran__identity_fp64_int32 // C type: double // A type: int32_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_int32 ( double *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ccl_utils.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <gsl/gsl_errno.h> #include "ccl.h" /* ------- ROUTINE: ccl_linear spacing ------ INPUTS: [xmin,xmax] of the interval to be divided in N bins OUTPUT: bin edges in range [xmin,xmax] */ double * ccl_linear_spacing(double xmin, double xmax, int N) { double dx = (xmax-xmin)/(N -1.); double * x = malloc(sizeof(double)*N); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for linear-spaced array (N=%d)\n", N); return x; } for (int i=0; i<N; i++) { x[i] = xmin + dx*i; } x[0]=xmin; //Make sure roundoff errors don't spoil edges x[N-1]=xmax; //Make sure roundoff errors don't spoil edges return x; } /* ------- ROUTINE: ccl_linlog spacing ------ * INPUTS: [xminlog,xmax] of the interval to be divided in bins * xmin when linear spacing starts * Nlog number of logarithmically spaced bins * Nlin number of linearly spaced bins * OUTPUT: bin edges in range [xminlog,xmax] * */ double * ccl_linlog_spacing(double xminlog, double xmin, double xmax, int Nlog, int Nlin) { if (Nlog<2) { ccl_raise_warning( CCL_ERROR_LINLOGSPACE, "ERROR: Cannot make log-spaced array with %d points - need at least 2\n", Nlog); return NULL; } if (!(xminlog>0 && xmin>0)) { ccl_raise_warning( CCL_ERROR_LINLOGSPACE, "ERROR: Cannot make log-spaced array xminlog or xmin non-positive (had %le, %le)\n", xminlog, xmin); return NULL; } if (xminlog>xmin){ ccl_raise_warning(CCL_ERROR_LINLOGSPACE, "ERROR: xminlog must be smaller as xmin"); return NULL; } if (xmin>xmax){ ccl_raise_warning(CCL_ERROR_LINLOGSPACE, "ERROR: xmin must be smaller as xmax"); return NULL; } double * x = malloc(sizeof(double)*(Nlin+Nlog-1)); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for array of size (Nlin+Nlog-1)=%d)\n", (Nlin+Nlog-1)); return x; } double dx = (xmax-xmin)/(Nlin -1.); double log_xchange = log(xmin); double log_xmin = log(xminlog); double dlog_x = (log_xchange - log_xmin) / (Nlog-1.); for (int i=0; i<Nlin+Nlog-1; i++) { if (i<Nlog) x[i] = exp(log_xmin + dlog_x*i); if (i>=Nlog) x[i] = xmin + dx*(i-Nlog+1); } x[0]=xminlog; //Make sure roundoff errors don't spoil edges x[Nlog-1]=xmin; //Make sure roundoff errors don't spoil edges x[Nlin+Nlog-2]=xmax; //Make sure roundoff errors don't spoil edges return x; } /* ------- ROUTINE: ccl_log spacing ------ INPUTS: [xmin,xmax] of the interval to be divided logarithmically in N bins TASK: divide an interval in N logarithmic bins OUTPUT: bin edges in range [xmin,xmax] */ double * ccl_log_spacing(double xmin, double xmax, int N) { if (N<2) { ccl_raise_warning( CCL_ERROR_LOGSPACE, "ERROR: Cannot make log-spaced array with %d points - need at least 2\n", N); return NULL; } if (!(xmin>0 && xmax>0)) { ccl_raise_warning( CCL_ERROR_LOGSPACE, "ERROR: Cannot make log-spaced array xmax or xmax non-positive (had %le, %le)\n", xmin, xmax); return NULL; } double log_xmax = log(xmax); double log_xmin = log(xmin); double dlog_x = (log_xmax - log_xmin) / (N-1.); double * x = malloc(sizeof(double)*N); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for log-spaced array (N=%d)\n", N); return x; } double xratio = exp(dlog_x); x[0] = xmin; //Make sure roundoff errors don't spoil edges for (int i=1; i<N-1; i++) { x[i] = x[i-1] * xratio; } x[N-1]=xmax; //Make sure roundoff errors don't spoil edges return x; } #define CCL_GAMMA1 2.6789385347077476336556 //Gamma(1/3) #define CCL_GAMMA2 1.3541179394264004169452 //Gamma(2/3) #define CCL_ROOTPI12 21.269446210866192327578 //12*sqrt(pi) double ccl_j_bessel(int l,double x) { double jl; double ax=fabs(x); double ax2=x*x; if(l<0) { fprintf(stderr,"CosmoMas: l>0 for Bessel function"); exit(1); } if(l<7) { if(l==0) { if(ax<0.1) jl=1-ax2*(1-ax2/20.)/6.; else jl=sin(x)/x; } else if(l==1) { if(ax<0.2) jl=ax*(1-ax2*(1-ax2/28)/10)/3; else jl=(sin(x)/ax-cos(x))/ax; } else if(l==2) { if(ax<0.3) jl=ax2*(1-ax2*(1-ax2/36)/14)/15; else jl=(-3*cos(x)/ax-sin(x)*(1-3/ax2))/ax; } else if(l==3) { if(ax<0.4) jl=ax*ax2*(1-ax2*(1-ax2/44)/18)/105; else jl=(cos(x)*(1-15/ax2)-sin(x)*(6-15/ax2)/ax)/ax; } else if(l==4) { if(ax<0.6) jl=ax2*ax2*(1-ax2*(1-ax2/52)/22)/945; else jl=(sin(x)*(1-(45-105/ax2)/ax2)+cos(x)*(10-105/ax2)/ax)/ax; } else if(l==5) { if(ax<1.0) jl=ax2*ax2*ax*(1-ax2*(1-ax2/60)/26)/10395; else { jl=(sin(x)*(15-(420-945/ax2)/ax2)/ax- cos(x)*(1-(105-945/ax2)/ax2))/ax; } } else { if(ax<1.0) jl=ax2*ax2*ax2*(1-ax2*(1-ax2/68)/30)/135135; else { jl=(sin(x)*(-1+(210-(4725-10395/ax2)/ax2)/ax2)+ cos(x)*(-21+(1260-10395/ax2)/ax2)/ax)/ax; } } } else { double nu=l+0.5; double nu2=nu*nu; if(ax<1.0E-40) jl=0; else if((ax2/l)<0.5) { jl=(exp(l*log(ax/nu)-M_LN2+nu*(1-M_LN2)-(1-(1-3.5/nu2)/(30*nu2))/(12*nu))/nu)* (1-ax2/(4*nu+4)*(1-ax2/(8*nu+16)*(1-ax2/(12*nu+36)))); } else if((l*l/ax)<0.5) { double beta=ax-0.5*M_PI*(l+1); jl=(cos(beta)*(1-(nu2-0.25)*(nu2-2.25)/(8*ax2)*(1-(nu2-6.25)*(nu2-12.25)/(48*ax2)))- sin(beta)*(nu2-0.25)/(2*ax)*(1-(nu2-2.25)*(nu2-6.25)/(24*ax2)* (1-(nu2-12.25)*(nu2-20.25)/(80*ax2))))/ax; } else { double l3=pow(nu,0.325); if(ax<nu-1.31*l3) { double cosb=nu/ax; double sx=sqrt(nu2-ax2); double cotb=nu/sx; double secb=ax/nu; double beta=log(cosb+sx/ax); double cot3b=cotb*cotb*cotb; double cot6b=cot3b*cot3b; double sec2b=secb*secb; double expterm=((2+3*sec2b)*cot3b/24 -((4+sec2b)*sec2b*cot6b/16 +((16-(1512+(3654+375*sec2b)*sec2b)*sec2b)*cot3b/5760 +(32+(288+(232+13*sec2b)*sec2b)*sec2b)*sec2b*cot6b/(128*nu))* cot6b/nu)/nu)/nu; jl=sqrt(cotb*cosb)/(2*nu)*exp(-nu*beta+nu/cotb-expterm); } else if(ax>nu+1.48*l3) { double cosb=nu/ax; double sx=sqrt(ax2-nu2); double cotb=nu/sx; double secb=ax/nu; double beta=acos(cosb); double cot3b=cotb*cotb*cotb; double cot6b=cot3b*cot3b; double sec2b=secb*secb; double trigarg=nu/cotb-nu*beta-0.25*M_PI- ((2+3*sec2b)*cot3b/24+(16-(1512+(3654+375*sec2b)*sec2b)*sec2b)* cot3b*cot6b/(5760*nu2))/nu; double expterm=((4+sec2b)*sec2b*cot6b/16- (32+(288+(232+13*sec2b)*sec2b)*sec2b)* sec2b*cot6b*cot6b/(128*nu2))/nu2; jl=sqrt(cotb*cosb)/nu*exp(-expterm)*cos(trigarg); } else { double beta=ax-nu; double beta2=beta*beta; double sx=6/ax; double sx2=sx*sx; double secb=pow(sx,0.3333333333333333333333); double sec2b=secb*secb; jl=(CCL_GAMMA1*secb+beta*CCL_GAMMA2*sec2b -(beta2/18-1.0/45.0)*beta*sx*secb*CCL_GAMMA1 -((beta2-1)*beta2/36+1.0/420.0)*sx*sec2b*CCL_GAMMA2 +(((beta2/1620-7.0/3240.0)*beta2+1.0/648.0)*beta2-1.0/8100.0)*sx2*secb*CCL_GAMMA1 +(((beta2/4536-1.0/810.0)*beta2+19.0/11340.0)*beta2-13.0/28350.0)*beta*sx2*sec2b*CCL_GAMMA2 -((((beta2/349920-1.0/29160.0)*beta2+71.0/583200.0)*beta2-121.0/874800.0)* beta2+7939.0/224532000.0)*beta*sx2*sx*secb*CCL_GAMMA1)*sqrt(sx)/CCL_ROOTPI12; } } } if((x<0)&&(l%2!=0)) jl=-jl; return jl; } void ccl_integ_spline(int ny, int nx,double *x,double **y, double a, double b, double *result, const gsl_interp_type *T, int *status) { if(b==a) { int iyy; for(iyy=0; iyy<ny; iyy++) result[iyy]=0; return; } if(b<a) { b=x[nx-1]; a=x[0]; } if((b>x[nx-1]) || (a<x[0])) { ccl_raise_warning(CCL_ERROR_SPLINE, "ERROR: integration limits beyond interpolated range\n"); *status = CCL_ERROR_SPLINE; return; } if(*status==0) { #pragma omp parallel default(none) \ shared(nx, ny, x, y, a, b, result, T, status) { int iy; int local_status=0; gsl_interp_accel *ia = NULL; gsl_spline *s = NULL; s = gsl_spline_alloc(T, nx); if(s == NULL) local_status = CCL_ERROR_MEMORY; if(!local_status) { ia = gsl_interp_accel_alloc(); if(ia == NULL) local_status = CCL_ERROR_MEMORY; } if(!local_status) { #pragma omp for for(iy=0; iy<ny; iy++) { if(!local_status) { if(gsl_spline_init(s, x, y[iy], nx)) { local_status = CCL_ERROR_SPLINE; result[iy] = NAN; } } if(!local_status) { int sstat = gsl_spline_eval_integ_e(s, a, b, ia, &(result[iy])); if(sstat) { local_status = CCL_ERROR_SPLINE_EV; result[iy] = NAN; } } } } gsl_spline_free(s); gsl_interp_accel_free(ia); if (local_status) { #pragma omp atomic write *status = local_status; } } //end omp parallel } }
DRB046-doall2-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: Only one loop is associated with the omp for construct. The inner loop's loop iteration variable needs an explicit private() clause, otherwise it will be shared by default. */ #include <stdio.h> #include <stdlib.h> int a[100][100]; int main() { int i, j; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name main#0#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<100; j ++ ) { a[i][j]=((i*200)+j); } } #pragma cetus private(i, j) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name main#1#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<100; j ++ ) { a[i][j]=(a[i][j]+1); } } #pragma cetus private(i, j) #pragma loop name main#2 for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name main#2#0 for (j=0; j<100; j ++ ) { printf("%d", a[i][j]); } } _ret_val_0=0; return _ret_val_0; }
orphaning.c
#include <stdio.h> #include <omp.h> int a[1000000]; int b[100]; int c[10]; int N = 1000000; int M = 100; int P = 10; int v1 =0; int v2 =0; void alpha() { for (int i = 0; i < P; ++i) { c[i] += 99; } } void fred() { #pragma omp for reduction(+: v1,v2) for (int i = 0; i < N; ++i) { a[i] += 99; v1+=1; v2+=1; } printf("v1: %d\n",v1); printf("v2: %d\n",v2); } void berg() { for (int i = 0; i < M; ++i) { b[i] += 99; } } int main(int argc, char const *argv[]) { /* code */ double start = omp_get_wtime(); #pragma omp parallel num_threads(4) { for (int i = 0; i < 100; ++i) { #pragma omp master { alpha(); } fred(); // this should be parallel #pragma omp master { berg(); // this remains on master thread } } } // 0.253465 // 4 threads: 0.091542 double end = omp_get_wtime(); // no parallel time: 0.048721 printf("%f\n", end - start); printf("%d\n", a[N - 1]); // should always be 9900 printf("%d\n", b[M - 1]); // should always be 9900 printf("%d\n", c[P - 1]); // should always be 9900 return 0; }
sample.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]){ int sum = 0; #pragma omp parallel private(sum) { #pragma omp critical { sum += omp_get_thread_num(); } } printf("sum = %d\n", sum); return 0; }
residualbased_newton_raphson_mpc_contact_strategy.h
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Contact criteria #include "custom_strategies/custom_convergencecriterias/mpc_contact_criteria.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "utilities/atomic_utilities.h" // // Processes // #include "processes/fast_transfer_between_model_parts_process.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonMPCContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonMPCContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonMPCContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace> SolvingStrategyType; typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedNewtonRaphsonMPCContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef MPCContactCriteria<TSparseSpace, TDenseSpace> TMPCContactCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType; typedef std::size_t IndexType; typedef std::size_t SizeType; /** * @brief Default constructor */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonMPCContactStrategy() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename SolvingStrategyType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY BaseType::Predict(); // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // We solve the system in order to check the active set once TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); // Check active set const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); BaseType::mpConvergenceCriteria->SetEchoLevel(0); mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); BaseType::mpConvergenceCriteria->SetEchoLevel(echo_level_convergence_criteria); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; // Computing nodal weights ComputeNodalWeights(); BaseType::Initialize(); KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Comment for proper work of interaction return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { // Computing nodal weights ComputeNodalWeights(); BaseType::InitializeSolutionStep(); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; BaseType::FinalizeSolutionStep(); KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); if (r_process_info.Is(INTERACTION)) { // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; int inner_iteration = 0; const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << inner_iteration << std::endl; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; is_converged = AuxiliarSolveSolutionStep(); // We check the convergence if (r_process_info[NL_ITERATION_NUMBER] == 1) r_process_info[NL_ITERATION_NUMBER] = 2; // Trigger check is_converged = mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { if (is_converged) KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { is_converged = AuxiliarSolveSolutionStep(); } return is_converged; KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. (auxiliar method) */ bool AuxiliarSolveSolutionStep() { // Getting flag INTERACTION ModelPart& r_model_part = StrategyBaseType::GetModelPart(); const bool update_each_nl_iteration = mThisParameters["update_each_nl_iteration"].GetBool(); VariableUtils().SetFlag(INTERACTION, update_each_nl_iteration, r_model_part.GetSubModelPart("ComputingContact").Conditions()); // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = this->GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = this->GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // Initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; // Computing nodal weights ComputeNodalWeights(); p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 0 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } // Iteration Cycle... performed only for NonLinearProblems while (!is_converged && iteration_number++ < BaseType::mMaxIterationNumber) { // Setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; // Computing nodal weights ComputeNodalWeights(); // Calling InitializeNonLinIteration p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Shaping correctly the system if (update_each_nl_iteration) { p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); p_builder_and_solver->SetUpSystem(r_model_part); p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, BaseType::mpA, BaseType::mpDx, BaseType::mpb, r_model_part); } is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Call the linear system solver to find the correction mDx for the it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || !StrategyBaseType::mStiffnessMatrixIsBuilt) { if (!BaseType::GetKeepSystemConstantDuringIterations()) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber) { BaseType::MaxIterationsExceeded(); } else { KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << BaseType::mMaxIterationNumber << " iterations" << std::endl; } // Recalculate residual if needed (note that some convergence criteria need it to be recalculated) if (!residual_is_updated) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "newton_raphson_mpc_contact_strategy", "inner_loop_iterations" : 5, "update_each_nl_iteration" : false, "enforce_ntn" : false })" ); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "newton_raphson_mpc_contact_strategy"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters typename TConvergenceCriteriaType::Pointer mpMPCContactCriteria; /// The contact criteria ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); // Copy the parameters mThisParameters = ThisParameters; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonMPCContactStrategy(const ResidualBasedNewtonRaphsonMPCContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /** // * @brief This inforces NTN formulation // */ // void EnforcingNTN() // { // // List of enforced nodes to not repeat // std::unordered_set<IndexType> enforced_nodes; // // // Getting contact model part // ModelPart& r_root_model_part = StrategyBaseType::GetModelPart().GetRootModelPart(); // ModelPart& r_computing_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("ComputingContact"); // // // The process info // const auto& r_process_info = r_root_model_part.GetProcessInfo(); // // // Reset the pointers of the conditions // for (auto& r_cond : r_computing_contact_model_part.Conditions()) { // if (r_cond.Has(CONSTRAINT_POINTER)) { // r_cond.SetValue(CONSTRAINT_POINTER, nullptr); // } // } // // // Iterate over the constraints // IndexType counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // // // Auxiliar classes // Matrix original_relation_matrix, relation_matrix; // Vector original_constant_vector, constant_vector; // ModelPart::DofsVectorType original_master_dofs, master_dofs, original_slave_dofs, slave_dofs; // // // Iterate over the constraints // for (auto& r_const : r_computing_contact_model_part.MasterSlaveConstraints()) { // // Getting original system // r_const.GetLocalSystem(original_relation_matrix, original_constant_vector, r_process_info); // r_const.GetDofList(original_slave_dofs, original_master_dofs, r_process_info); // // // TODO: Finish rebuild // // // Creating new constraint // r_root_model_part.CreateNewMasterSlaveConstraint("LinearMasterSlaveConstraint", counter, master_dofs, slave_dofs, relation_matrix, constant_vector); // // // Setting to remove the old constraints // r_const.Set(TO_ERASE, true); // // ++counter; // } // // // Remove old constraints // r_root_model_part.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE); // // // Transfer constraints from the root to the computing model part // FastTransferBetweenModelPartsProcess(r_computing_contact_model_part, r_root_model_part, FastTransferBetweenModelPartsProcess::EntityTransfered::CONSTRAINTS).Execute(); // // // Reorder ids // counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // } /** * @brief This computes the nodal weights */ void ComputeNodalWeights() { // Getting contact model part ModelPart& r_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("Contact"); // Reset the NODAL_PAUX and NODAL_MAUX auto& r_nodes_array = r_contact_model_part.Nodes(); VariableUtils().SetNonHistoricalVariableToZero(NODAL_PAUX, r_nodes_array); VariableUtils().SetNonHistoricalVariableToZero(NODAL_MAUX, r_nodes_array); // We set the constraints active and inactive in function of the active set auto& r_conditions_array = r_contact_model_part.Conditions(); auto it_cond_begin = r_conditions_array.begin(); // If enforcing NTN const bool enforce_ntn = false; // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // VariableUtils().SetNonHistoricalVariable(NODAL_PAUX, 1.0, r_nodes_array); // } #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; // Only slave conditions if (it_cond->Is(SLAVE)) { auto& r_geometry = it_cond->GetGeometry(); Vector lumping_factor; lumping_factor = r_geometry.LumpingFactors(lumping_factor); const double domain_size = r_geometry.DomainSize(); for (IndexType i_node = 0; i_node < r_geometry.size(); ++i_node) { auto& r_node = r_geometry[i_node]; if (!enforce_ntn) { AtomicAdd(r_node.GetValue(NODAL_PAUX), 1.0); } AtomicAdd(r_node.GetValue(NODAL_MAUX), lumping_factor[i_node] * domain_size); } } } } ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonMPCContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY */
munit.c
/* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (true)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) # define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) # define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL bool munit_error_jmp_buf_valid = false; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to * change something, please do it there so we can keep the code in * sync. */ /* Clocks (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) # include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_CLOCK__COMPILER_ATTRIBUTES # endif # define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX * time. Keep in mind that this clock doesn't account for leap * seconds, and can go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* The CPU time is a clock which increases only when the current * process is active (i.e., it doesn't increment while blocking on * I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* Monotonic time is always running (unlike CPU time), but it only ever moves forward unless you reboot the system. Things like NTP adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) # define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else # define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* We want to be able to detect the libc implementation, so we include <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) # include <limits.h> # include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* These are known to work without librt. If you know of others * please let us know so we can add them. */ # if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # elif !defined(PSNIP_CLOCK_NO_LIBRT) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # endif #endif #if defined(_WIN32) # if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER # endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME # endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) # include <time.h> # if !defined(PSNIP_CLOCK_WALL_METHOD) # if defined(CLOCK_REALTIME_PRECISE) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE # elif !defined(__sun) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME # endif # endif # if !defined(PSNIP_CLOCK_CPU_METHOD) # if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID # elif defined(CLOCK_VIRTUAL) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL # endif # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # if defined(CLOCK_MONOTONIC_RAW) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # elif defined(CLOCK_MONOTONIC_PRECISE) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE # elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # endif # endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) # if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY # endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) # error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) # include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) # include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) # include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) # include <sys/time.h> # include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) # include <CoreServices/CoreServices.h> # include <mach/mach.h> # include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres (clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision (void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time (struct PsnipClockTimespec* res) { (void) res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision (void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) -1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void) res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision (void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* Returns the number of ticks per second for the specified clock. * For example, a clock with millisecond precision would return 1000, * and a clock with 1 second (such as the time() function) would * return 1. * * If the requested clock isn't available, it will return 0. * Hopefully this will be rare, but if it happens to you please let us * know so we can work on finding a way to support your system. * * Note that different clocks on the same system often have a * different precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision (enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision (); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision (); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision (); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* Set the provided timespec to the requested time. Returns 0 on * success, or a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time (res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time (res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time (res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #else # include <time.h> #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = true; } else { ret = false; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return true; } else { return false; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { munit_uint32_t seed, state; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wc = { 0, }; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; #else seed = (munit_uint32_t) time(NULL); #endif state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; bool single_parameter_mode; void* user_data; MunitReport report; bool colorize; bool fork; bool show_stderr; bool fatal_failures; } MunitTestRunner; const char* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } #if defined(MUNIT_ENABLE_TIMING) static void munit_print_time(FILE* fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC)); } #endif /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (true); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, }; struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, }; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = true; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = false; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = true; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; char** values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; *values != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; bool filled; unsigned int possible; char** vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = false; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = true; break; } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; *vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for (test = suite->tests ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; bool first; char** val; const MunitSuite* child_suite; for (test = suite->tests ; test != NULL && test->name != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = true; for (val = params->values ; *val != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = false; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return false; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = false; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = false; #if !defined(_WIN32) runner.fork = true; #else runner.fork = false; #endif runner.show_stderr = false; runner.fatal_failures = false; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (char*) argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = true; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = false; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = true; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = true; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = false; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = true; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, false, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, true, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
sub_copy.c
// pmlib C++ test program based on stream.c by John McCalpin # include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif // #ifdef __cplusplus // extern "C" int omp_get_num_threads(); // #else // extern int omp_get_num_threads(); // #endif # define FLT_MAX 1.0E+6 // # define N 10000000 # define N 50000000 // if N >= 100M, the mcmodel compile option will be needed # define NTIMES 10 # define OFFSET 0 # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif static double a[N+OFFSET], b[N+OFFSET], c[N+OFFSET]; static double avgtime[4] = {0,0,0,0}, maxtime[4] = {0,0,0,0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(double) * (double)N, 2 * sizeof(double) * (double)N, 3 * sizeof(double) * (double)N, 3 * sizeof(double) * (double)N }; extern double mysecond(); void stream_copy(double * flop_count) { int quantum, checktick(); int BytesPerWord; register int j, k; double scalar, times[4][NTIMES]; k = 0; #ifdef _OPENMP k = omp_get_max_threads(); #endif printf("Modified STREAM COPY, num_threads=%d, array size= %d\n", k, N); #pragma omp parallel for for (j=0; j<N; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]; times[0][k] = mysecond() - times[0][k]; } (*flop_count) = 0.0; for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { j=0; avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } // printf("Function Rate (MB/s) Avg time Min time Max time\n"); { j=0; avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } } void stream_triad(double * flop_count) { int quantum, checktick(); int BytesPerWord; register int j, k; double scalar, times[4][NTIMES]; k = 0; #ifdef _OPENMP k = omp_get_max_threads(); #endif printf("Modified STREAM TRIAD, num_threads=%d, array size= %d\n", k, N); #pragma omp parallel for for (j=0; j<N; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } scalar = 3.0; for (k=0; k<NTIMES; k++) { times[3][k] = mysecond(); #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; times[3][k] = mysecond() - times[3][k]; } (*flop_count) = (double)NTIMES * (double)N * 2; for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { j=3; avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } // printf("Function Rate (MB/s) Avg time Min time Max time\n"); { j=3; avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #ifdef __GNUC__ #define __USE_BSD 1 #endif #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); }
mixed_tentusscher_myo_epi_2004_S1_18.c
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S1_18.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6407442866583,0.00127024730863006,0.781477837871060,0.781226285372551,0.000173058844459830,0.485844316142820,0.00292517461971129,0.999998371825952,1.91031873007277e-08,1.87288135192733e-05,0.999773522474666,1.00766286802375,0.999999451356628,3.16576129409975e-05,0.737961690357158,10.2441215797546,139.210514590526}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.5383636643555,0.000359007183612285,0.000154135859579797,0.000217532604523131,0.265156052763393,0.186639850277223,0.149365610424309,3.43320580539409,0.0166941723782826,1.45123160724562,1094.13527370174,0.000494385096732911,0.269171393030809,0.0183256017779276,0.00468024174172971,1.50869252254344e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
GB_binop__ne_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ne_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ne_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ne_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fp32) // A*D function (colscale): GB (_AxD__ne_fp32) // D*A function (rowscale): GB (_DxB__ne_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__ne_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ne_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fp32) // C=scalar+B GB (_bind1st__ne_fp32) // C=scalar+B' GB (_bind1st_tran__ne_fp32) // C=A+scalar GB (_bind2nd__ne_fp32) // C=A'+scalar GB (_bind2nd_tran__ne_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FP32 || GxB_NO_NE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ne_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task_set.c
#include "communicator.h" #include "task_set.h" #ifdef __cplusplus extern "C" { #endif #if TCI_USE_OPENMP_THREADS || TCI_USE_PTHREADS_THREADS || TCI_USE_WINDOWS_THREADS void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work) { set->comm = comm; set->ntask = ntask; if (tci_comm_is_master(comm)) { set->slots = (tci_slot*)malloc((ntask+1)*sizeof(tci_slot)); for (unsigned task = 0;task < ntask;task++) tci_slot_init(set->slots+task+1, 0); } tci_comm_bcast(comm, (void**)&set->slots, 0); unsigned nt = comm->nthread; unsigned nt_outer, nt_inner; tci_partition_2x2(nt, work, (work == 0 ? 1 : nt), ntask, ntask, &nt_inner, &nt_outer); tci_comm_gang(comm, &set->subcomm, TCI_EVENLY, nt_outer, 0); } void tci_task_set_destroy(tci_task_set* set) { tci_comm_barrier(set->comm); tci_comm_destroy(&set->subcomm); if (tci_comm_is_master(set->comm)) free((void*)set->slots); } int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task, void* payload) { if (task > set->ntask) return EINVAL; if (!tci_slot_try_fill(set->slots+task+1, 0, set->subcomm.gid+1)) return EALREADY; func(&set->subcomm, task, payload); return 0; } int tci_task_set_visit_all(tci_task_set* set, tci_task_func func, void* payload) { for (unsigned task = 0;task < set->ntask;task++) { int ret = tci_task_set_visit(set, func, task, payload); if (ret != 0) return ret; } return 0; } #elif TCI_USE_TBB_THREADS void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work) { (void)comm; (void)work; set->comm = (tci_comm*)new tbb::task_group(); set->ntask = ntask; set->slots = new tci_slot[ntask]; for (unsigned task = 0;task < ntask;task++) tci_slot_init(set->slots+task, 0); } void tci_task_set_destroy(tci_task_set* set) { ((tbb::task_group*)set->comm)->wait(); delete[] set->slots; delete (tbb::task_group*)set->comm; } int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task, void* payload) { if (task > set->ntask) return EINVAL; if (!tci_slot_try_fill(set->slots+task, 0, 1)) return EALREADY; ((tbb::task_group*)set->comm)->run( [&,func,task,payload] { func(tci_single, task, payload); }); return 0; } int tci_task_set_visit_all(tci_task_set* set, tci_task_func func, void* payload) { for (unsigned task = 0;task < set->ntask;task++) { int ret = tci_task_set_visit(set, func, task, payload); if (ret != 0) return ret; } return 0; } #elif TCI_USE_OMPTASK_THREADS void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work) { (void)comm; (void)work; set->ntask = ntask; set->slots = (tci_slot*)malloc(sizeof(tci_slot)*ntask); for (unsigned task = 0;task < ntask;task++) tci_slot_init(set->slots+task, 0); } void tci_task_set_destroy(tci_task_set* set) { #pragma omp taskwait free((void*)set->slots); } int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task, void* payload) { if (task > set->ntask) return EINVAL; if (!tci_slot_try_fill(set->slots+task, 0, 1)) return EALREADY; #pragma omp task { func(tci_single, task, payload); } return 0; } int tci_task_set_visit_all(tci_task_set* set, tci_task_func func, void* payload) { for (unsigned task = 0;task < set->ntask;task++) { int ret = tci_task_set_visit(set, func, task, payload); if (ret != 0) return ret; } return 0; } #elif TCI_USE_DISPATCH_THREADS void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work) { (void)comm; (void)work; *(dispatch_group_t*)&set->comm = dispatch_group_create(); *(dispatch_queue_t*)&set->subcomm = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); set->ntask = ntask; set->slots = (tci_slot*)malloc(sizeof(tci_slot)*ntask); for (unsigned task = 0;task < ntask;task++) tci_slot_init(set->slots+task, 0); } void tci_task_set_destroy(tci_task_set* set) { dispatch_group_t group = *(dispatch_group_t*)&set->comm; dispatch_group_wait(group, DISPATCH_TIME_FOREVER); dispatch_release(group); free((void*)set->slots); } typedef struct tci_task_func_data { tci_task_func func; unsigned task; void* payload; } tci_task_func_data; static void tci_task_launcher(void* data_) { tci_task_func_data* data = (tci_task_func_data*)data_; data->func(tci_single, data->task, data->payload); } int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task, void* payload) { if (task > set->ntask) return EINVAL; if (!tci_slot_try_fill(set->slots+task, 0, 1)) return EALREADY; tci_task_func_data data = {func, task, payload}; dispatch_group_t group = *(dispatch_group_t*)&set->comm; dispatch_queue_t queue = *(dispatch_queue_t*)&set->subcomm; dispatch_group_async_f(group, queue, &data, tci_task_launcher); return 0; } int tci_task_set_visit_all(tci_task_set* set, tci_task_func func, void* payload) { for (unsigned task = 0;task < set->ntask;task++) { int ret = tci_task_set_visit(set, func, task, payload); if (ret != 0) return ret; } return 0; } #elif TCI_USE_PPL_THREADS void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work) { (void)comm; (void)work; set->comm = (tci_comm*)new concurrency::task_group(); set->ntask = ntask; set->slots = new tci_slot[ntask]; for (unsigned task = 0;task < ntask;task++) tci_slot_init(set->slots+task, 0); } void tci_task_set_destroy(tci_task_set* set) { ((concurrency::task_group*)set->comm)->wait(); delete[] set->slots; delete (concurrency::task_group*)set->comm; } int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task, void* payload) { if (task > set->ntask) return EINVAL; if (!tci_slot_try_fill(set->slots+task, 0, 1)) return EALREADY; ((concurrency::task_group*)set->comm)->run( [&,func,task,payload] { func(tci_single, task, payload); }); return 0; } int tci_task_set_visit_all(tci_task_set* set, tci_task_func func, void* payload) { for (unsigned task = 0;task < set->ntask;task++) { int ret = tci_task_set_visit(set, func, task, payload); if (ret != 0) return ret; } return 0; } #else // single threaded void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work) { (void)comm; (void)work; set->ntask = ntask; } void tci_task_set_destroy(tci_task_set* set) { (void)set; } int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task, void* payload) { if (task > set->ntask) return EINVAL; func(tci_single, task, payload); return 0; } int tci_task_set_visit_all(tci_task_set* set, tci_task_func func, void* payload) { for (unsigned task = 0;task < set->ntask;task++) func(tci_single, task, payload); return 0; } #endif #ifdef __cplusplus } #endif
GB_binop__iseq_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int16) // A*D function (colscale): GB (_AxD__iseq_int16) // D*A function (rowscale): GB (_DxB__iseq_int16) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_int16) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int16) // C=scalar+B GB (_bind1st__iseq_int16) // C=scalar+B' GB (_bind1st_tran__iseq_int16) // C=A+scalar GB (_bind2nd__iseq_int16) // C=A'+scalar GB (_bind2nd_tran__iseq_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT16 || GxB_NO_ISEQ_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__minus_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_03__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int64) // A*D function (colscale): GB (_AxD__minus_int64) // D*A function (rowscale): GB (_DxB__minus_int64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int64) // C=scalar+B GB (_bind1st__minus_int64) // C=scalar+B' GB (_bind1st_tran__minus_int64) // C=A+scalar GB (_bind2nd__minus_int64) // C=A'+scalar GB (_bind2nd_tran__minus_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT64 || GxB_NO_MINUS_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
boxloop_cuda.h
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header info for the BoxLoop * *****************************************************************************/ /*-------------------------------------------------------------------------- * BoxLoop macros: *--------------------------------------------------------------------------*/ #ifndef HYPRE_BOXLOOP_CUDA_HEADER #define HYPRE_BOXLOOP_CUDA_HEADER #if (defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)) && !defined(HYPRE_USING_RAJA) && !defined(HYPRE_USING_KOKKOS) #define HYPRE_LAMBDA [=] __host__ __device__ /* TODO: RL: support 4-D */ typedef struct hypre_Boxloop_struct { HYPRE_Int lsize0, lsize1, lsize2; HYPRE_Int strides0, strides1, strides2; HYPRE_Int bstart0, bstart1, bstart2; HYPRE_Int bsize0, bsize1, bsize2; } hypre_Boxloop; #ifdef __cplusplus extern "C++" { #endif /* ------------------------- * parfor-loop * ------------------------*/ template <typename LOOP_BODY> __global__ void forall_kernel( LOOP_BODY loop_body, HYPRE_Int length ) { const HYPRE_Int idx = hypre_cuda_get_grid_thread_id<1, 1>(); /* const HYPRE_Int number_threads = hypre_cuda_get_grid_num_threads<1,1>(); */ if (idx < length) { loop_body(idx); } } template<typename LOOP_BODY> void BoxLoopforall( HYPRE_Int length, LOOP_BODY loop_body ) { HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle()); if (exec_policy == HYPRE_EXEC_HOST) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx); } } else if (exec_policy == HYPRE_EXEC_DEVICE) { const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); const dim3 gDim = hypre_GetDefaultDeviceGridDimension(length, "thread", bDim); HYPRE_CUDA_LAUNCH( forall_kernel, gDim, bDim, loop_body, length ); } } /* ------------------------------ * parforreduction-loop * -----------------------------*/ template <typename LOOP_BODY, typename REDUCER> __global__ void reductionforall_kernel( HYPRE_Int length, REDUCER reducer, LOOP_BODY loop_body ) { const HYPRE_Int thread_id = hypre_cuda_get_grid_thread_id<1, 1>(); const HYPRE_Int n_threads = hypre_cuda_get_grid_num_threads<1, 1>(); for (HYPRE_Int idx = thread_id; idx < length; idx += n_threads) { loop_body(idx, reducer); } /* reduction in block-level and the save the results in reducer */ reducer.BlockReduce(); } template<typename LOOP_BODY, typename REDUCER> void ReductionBoxLoopforall( HYPRE_Int length, REDUCER & reducer, LOOP_BODY loop_body ) { if (length <= 0) { return; } HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle()); if (exec_policy == HYPRE_EXEC_HOST) { for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx, reducer); } } else if (exec_policy == HYPRE_EXEC_DEVICE) { const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(length, "thread", bDim); /* Note: we assume gDim cannot exceed 1024 * and bDim < WARP * WARP */ gDim.x = hypre_min(gDim.x, 1024); reducer.nblocks = gDim.x; /* hypre_printf("length= %d, blocksize = %d, gridsize = %d\n", length, bDim.x, gDim.x); */ HYPRE_CUDA_LAUNCH( reductionforall_kernel, gDim, bDim, length, reducer, loop_body ); } } #ifdef __cplusplus } #endif /* Get 1-D length of the loop, in hypre__tot */ #define hypre_newBoxLoopInit(ndim, loop_size) \ HYPRE_Int hypre__tot = 1; \ for (HYPRE_Int hypre_d = 0; hypre_d < ndim; hypre_d ++) \ { \ hypre__tot *= loop_size[hypre_d]; \ } /* Initialize struct for box-k */ #define hypre_BoxLoopDataDeclareK(k, ndim, loop_size, dbox, start, stride) \ hypre_Boxloop databox##k; \ /* dim 0 */ \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = start[0] - dbox->imin[0]; \ databox##k.bsize0 = dbox->imax[0] - dbox->imin[0]; \ /* dim 1 */ \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = start[1] - dbox->imin[1]; \ databox##k.bsize1 = dbox->imax[1] - dbox->imin[1]; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ /* dim 2 */ \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = start[2] - dbox->imin[2]; \ databox##k.bsize2 = dbox->imax[2] - dbox->imin[2]; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } #define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \ hypre_Boxloop databox##k; \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = 0; \ databox##k.bsize0 = 0; \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } /* RL: TODO loop_size out of box struct, bsize +1 */ /* Given input 1-D 'idx' in box, get 3-D 'local_idx' in loop_size */ #define hypre_newBoxLoopDeclare(box) \ hypre_Index local_idx; \ HYPRE_Int idx_local = idx; \ hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \ idx_local = idx_local / box.lsize0; \ hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \ idx_local = idx_local / box.lsize1; \ hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \ /* Given input 3-D 'local_idx', get 1-D 'hypre__i' in 'box' */ #define hypre_BoxLoopIncK(k, box, hypre__i) \ HYPRE_Int hypre_boxD##k = 1; \ HYPRE_Int hypre__i = 0; \ hypre__i += (hypre_IndexD(local_idx, 0) * box.strides0 + box.bstart0) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \ hypre__i += (hypre_IndexD(local_idx, 1) * box.strides1 + box.bstart1) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \ hypre__i += (hypre_IndexD(local_idx, 2) * box.strides2 + box.bstart2) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize2 + 1); /* get 3-D local_idx into 'index' */ #define hypre_BoxLoopGetIndex(index) \ index[0] = hypre_IndexD(local_idx, 0); \ index[1] = hypre_IndexD(local_idx, 1); \ index[2] = hypre_IndexD(local_idx, 2); /* BoxLoop 0 */ #define hypre_newBoxLoop0Begin(ndim, loop_size) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_newBoxLoop0End() \ }); \ } /* BoxLoop 1 */ #define hypre_newBoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); #define hypre_newBoxLoop1End(i1) \ }); \ } /* BoxLoop 2 */ #define hypre_newBoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); #define hypre_newBoxLoop2End(i1, i2) \ }); \ } /* BoxLoop 3 */ #define hypre_newBoxLoop3Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim,loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim,loop_size, dbox2, start2, stride2); \ hypre_BoxLoopDataDeclareK(3, ndim,loop_size, dbox3, start3, stride3); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ hypre_BoxLoopIncK(3, databox3, i3); #define hypre_newBoxLoop3End(i1, i2, i3) \ }); \ } /* BoxLoop 4 */ #define hypre_newBoxLoop4Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3, \ dbox4, start4, stride4, i4) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ hypre_BoxLoopDataDeclareK(3, ndim, loop_size, dbox3, start3, stride3); \ hypre_BoxLoopDataDeclareK(4, ndim, loop_size, dbox4, start4, stride4); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ hypre_BoxLoopIncK(3, databox3, i3); \ hypre_BoxLoopIncK(4, databox4, i4); #define hypre_newBoxLoop4End(i1, i2, i3, i4) \ }); \ } /* Basic BoxLoops have no boxes */ /* BoxLoop 1 */ #define zypre_newBasicBoxLoop1Begin(ndim, loop_size, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); /* BoxLoop 2 */ #define zypre_newBasicBoxLoop2Begin(ndim, loop_size, stride1, i1, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \ zypre_BasicBoxLoopDataDeclareK(2, ndim, loop_size, stride2); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ /* TODO: RL just parallel-for, it should not be here, better in utilities */ #define hypre_LoopBegin(size, idx) \ { \ BoxLoopforall(size, HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_LoopEnd() \ }); \ } /* Reduction BoxLoop1 */ #define hypre_BoxLoop1ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, reducesum) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); #define hypre_BoxLoop1ReductionEnd(i1, reducesum) \ }); \ } /* Reduction BoxLoop2 */ #define hypre_BoxLoop2ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, reducesum) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); #define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \ }); \ } /* Renamings */ #define hypre_BoxLoopBlock() 0 #define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin #define hypre_BoxLoop0For hypre_newBoxLoop0For #define hypre_BoxLoop0End hypre_newBoxLoop0End #define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin #define hypre_BoxLoop1For hypre_newBoxLoop1For #define hypre_BoxLoop1End hypre_newBoxLoop1End #define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin #define hypre_BoxLoop2For hypre_newBoxLoop2For #define hypre_BoxLoop2End hypre_newBoxLoop2End #define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin #define hypre_BoxLoop3For hypre_newBoxLoop3For #define hypre_BoxLoop3End hypre_newBoxLoop3End #define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin #define hypre_BoxLoop4For hypre_newBoxLoop4For #define hypre_BoxLoop4End hypre_newBoxLoop4End #define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin #define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin #endif #endif /* #ifndef HYPRE_BOXLOOP_CUDA_HEADER */
l1_kernel.c
/******************************************************************************* * Copyright 2019 UChicago Argonne, LLC. * (c.f. AUTHORS, LICENSE) * * This file is part of the AML project. * For more info, see https://github.com/anlsys/aml * * SPDX-License-Identifier: BSD-3-Clause ******************************************************************************/ /* * This is a benchmark for the BLAS Level 1 operations for AML. */ #include "blas/l1_kernel.h" /* Look into another way to define these */ #define sign(a) ((a > 0) ? 1 : ((a < 0) ? -1 : 0)) double dasum(size_t n, double *a, double *b, double *c, double scalar) { (void)*b; (void)*c; (void)scalar; size_t i; double dasum = 0; for (i = 0; i < n; i++) { dasum = dasum + fabs(a[i]); } return dasum; } double daxpy(size_t n, double *a, double *b, double *c, double scalar) { size_t i; #pragma omp parallel for for (i = 0; i < n; i++) c[i] = b[i] + scalar * a[i]; return 1; } double dcopy(size_t n, double *a, double *b, double *c, double scalar) { (void)*c; (void)scalar; size_t i; #pragma omp parallel for for (i = 0; i < n; i++) b[i] = a[i]; return 1; } double ddot(size_t n, double *a, double *b, double *c, double scalar) { (void)*c; (void)scalar; size_t i; long double dot = 0.0; #pragma omp parallel for reduction(+ : dot) for (i = 0; i < n; i++) { long double temp; temp = a[i] * b[i]; dot += temp; } return (double)dot; } double dnrm2(size_t n, double *a, double *b, double *c, double scalar) { (void)*b; (void)*c; (void)scalar; size_t i; double scale, ssq, temp; scale = 0.0; ssq = 1.0; for (i = 0; i < n; i++) { if (a[i] != 0.0) { temp = fabs(a[i]); if (scale < temp) { ssq = 1.0 + ssq * pow(scale / temp, 2); scale = temp; } else ssq = ssq + pow(temp / scale, 2); } } return scale * sqrt(ssq); } double dscal(size_t n, double *a, double *b, double *c, double scalar) { (void)*c; size_t i; #pragma omp parallel for for (i = 0; i < n; i++) b[i] = scalar * a[i]; return 1; } double dswap(size_t n, double *a, double *b, double *c, double scalar) { (void)*c; (void)scalar; size_t i; #pragma omp parallel for for (i = 0; i < n; i++) { double temp = a[i]; a[i] = b[i]; b[i] = temp; } return 1; } double idmax(size_t n, double *a, double *b, double *c, double scalar) { (void)*b; (void)*c; (void)scalar; if (n == 1) return 0; size_t i; double max; size_t id_max = 0; max = a[0]; for (i = 1; i < n; i++) { if (fabs(a[i]) > max) { id_max = i; max = fabs(a[i]); } } return id_max; } /* The rotations. Not included in the array of functions because of their parameters */ /* Plane rotation */ void drot(size_t n, double *a, double *b, double x, double y) { size_t i; #pragma omp parallel for for (i = 0; i < n; i++) { double temp = x * a[i] + y * b[i]; b[i] = x * b[i] - y * a[i]; a[i] = temp; } } /* Create a plane rotation. TODO: Verify */ void drotg(double x, double y, double c, double s) { double r, roe, scale, z; roe = y; if (fabs(x) > fabs(y)) roe = x; scale = fabs(x) + fabs(y); if (scale == 0.0) { c = 1.0; s = 0.0; r = 0.0; z = 0.0; } else { r = scale * sqrt(pow(x / scale, 2) + pow(y / scale, 2)); r = sign(roe) * r; c = x / r; s = y / r; z = 1.0; if (fabs(x) > fabs(y)) z = s; if (fabs(y) >= fabs(x) && c != 0.0) z = 1.0 / c; } x = r; y = z; } void drotm(size_t n, double *a, double *b, double *param) { double flag, h11, h12, h21, h22; size_t i; flag = param[0]; if (flag < 0.0) { h11 = param[1]; h12 = param[3]; h21 = param[2]; h22 = param[4]; } else { if (flag == 0) { h11 = 1.0; h12 = param[3]; h21 = param[2]; h22 = 1.0; } else { h11 = param[1]; h12 = 1.0; h21 = -1.0; h22 = param[4]; } } #pragma omp parallel for for (i = 0; i < n; i++) { double w = a[i]; double z = b[i]; a[i] = w * h11 + z * h12; b[i] = w * h21 + z * h22; } } /* TODO: Verify */ void drotmg(double d1, double d2, double x, double y, double *param) { double flag, h11, h12, h21, h22, p1, p2, q1, q2, temp, u, gam, gamsq, rgamsq; gam = 4096.0; gamsq = 16777216.0; rgamsq = 5.9604645e-8; /* default initialization */ h11 = 0.0; h12 = 0.0; h21 = 0.0; h22 = 0.0; if (d1 < 0) { flag = -1.0; d1 = 0.0; d2 = 0.0; x = 0.0; } else { p2 = d2 * y; if (p2 == 0) { flag = -2.0; param[0] = flag; } p1 = d1 * x; q2 = p2 * y; q1 = p1 * x; if (fabs(q1) > fabs(q2)) { h21 = -y / x; h12 = p2 / p1; u = 1.0 - h12 * h21; if (u > 0) { flag = 0.0; d1 = d1 / u; d2 = d2 / u; x = x * u; } } else { if (q2 < 0.0) { flag = -1.0; d1 = 0.0; d2 = 0.0; x = 0.0; } else { flag = 1.0; h11 = p1 / p2; h22 = x / y; u = 1.0 + h11 * h22; temp = d2 / u; d2 = d1 / u; d1 = temp; x = y * u; } } if (d1 != 0.0) { while (fabs(d1) <= rgamsq || d1 >= gamsq) { if (flag == 0.0) { h11 = 1.0; h22 = 1.0; } else { h21 = -1.0; h12 = 1.0; } flag = -1.0; if (d1 <= rgamsq) { d1 = d1 * pow(gam, 2); x = x / gam; h11 = h11 / gam; h12 = h12 / gam; } else { d1 = d1 / pow(gam, 2); x = x * gam; h11 = h11 * gam; h12 = h12 * gam; } } } if (d2 != 0) { while (fabs(d2) <= rgamsq || fabs(d2) >= gamsq) { if (flag == 0.0) { h11 = 1.0; h22 = 1.0; } else { h21 = -1.0; h12 = 1.0; } flag = -1.0; if (fabs(d2) <= rgamsq) { d2 = d2 * pow(gam, 2); h21 = h21 / gam; h22 = h22 / gam; } else { d2 = d2 / pow(gam, 2); h21 = h21 * gam; h22 = h22 * gam; } } } } param[1] = h11; param[2] = h21; param[3] = h12; param[4] = h22; param[0] = flag; }
matmult_omp_explicit.c
/* Matrix multiplication example OpenMP version, explicit domain decomposition Jim Teresco, CS 338, Williams College, CS 341, Mount Holyoke College Sun Feb 23 18:54:41 EST 2003 Updated for CSIS-335, Siena College, Fall 2021 */ /* header files needed for printf, gettimeofday, struct timeval */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> /* header file for our own timer.c function diffgettime */ #include "timer.h" /* we will multiply square matrices, how big? */ #define SIZE 1500 /* our matrices */ double a[SIZE][SIZE], b[SIZE][SIZE], c[SIZE][SIZE]; /* function to compute the result of row row in c */ void do_row(int row) { int col, k; for (col=0; col<SIZE; col++) { /* initialize entry */ c[row][col] = 0; /* perform dot product */ for(k=0; k<SIZE; k++) { c[row][col] = c[row][col] + a[row][k]*b[k][col]; } } } /* this function will be called by each thread created. Note that we could instead have put all of this code in a block under the #pragma omp parallel, but all of the local variables would have had to be included in a private clause. */ void worker() { int thread_num, num_threads, row, myrows, extrarows, startrow, finishrow; /* query OpenMP for number of threads and thread id */ thread_num = omp_get_thread_num(); num_threads = omp_get_num_threads(); /* some extra computation to deal with the fact that the number of threads might not be evenly divide the number of rows -- we will assign an extra row, as necessary, to higher numbered threads */ myrows = SIZE/num_threads; extrarows = SIZE%num_threads; /* start out with the assumption that none of our predecessors have gotten an extra row */ startrow = myrows*thread_num; /* see if this thread or any of its predecessors have an extra row */ if (extrarows >= num_threads-thread_num) { /* compensate for extra rows in predecessors */ startrow += extrarows - (num_threads-thread_num); /* this thread gets an extra row, too */ myrows++; } /* now that we know our real starting row and number of rows, we can figure our last row */ finishrow = startrow + myrows -1; printf("Worker %d will compute %d rows: %d-%d\n", thread_num, myrows, startrow, finishrow); for (row=startrow; row<=finishrow; row++) do_row(row); } /* it's a simple program for now, we'll just put everything in main */ int main(int argc, char *argv[]) { /* counters */ int i, j, k; double sum; /* to pass to gettimeofday to get wall clock times */ struct timeval start, stop; /* initialize and allocate matrices, just fill with junk */ gettimeofday(&start, NULL); for (i=0; i<SIZE; i++) { for (j=0; j<SIZE; j++) { a[i][j] = i+j; b[i][j] = i-j; } } gettimeofday(&stop, NULL); printf("Initialization took: %f seconds\n", diffgettime(start,stop)); gettimeofday(&start, NULL); /* matrix-matrix multiply */ #pragma omp parallel worker(); /* this is called by each thread */ /* there is an implied barrier here -- the master thread cannot continue until it and all other threads have completed the worker() call. */ gettimeofday(&stop, NULL); printf("Multiplication took: %f seconds\n", diffgettime(start,stop)); /* This is here to make sure the optimizing compiler doesn't get any big ideas about "optimizing" code away completely */ sum=0; for (i=0; i<SIZE; i++) { for (j=0; j<SIZE; j++) { sum += c[i][j]; } } printf("Sum of elements of c=%f\n", sum); return 0; }
ike_fmt_plug.c
/* PSK cracker patch for JtR. Hacked together during March of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> . * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and it is hereby released to the general public under GPL * * The IKE Scanner (ike-scan) is Copyright (C) 2003-2007 Roy Hills, * NTA Monitor Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library, and distribute linked combinations including the two. * * You must obey the GNU General Public License in all respects * for all of the code used other than OpenSSL. If you modify * file(s) with this exception, you may extend this exception to your * version of the file(s), but you are not obligated to do so. If you * do not wish to do so, delete this exception statement from your * version. * * If this license is unacceptable to you, I may be willing to negotiate * alternative licenses (contact ike-scan@nta-monitor.com). * * You are encouraged to send comments, improvements or suggestions to * me at ike-scan@nta-monitor.com. * * psk-crack.c -- IKE Aggressive Mode Pre-Shared Key cracker for ike-scan * * Author: Roy Hills * Date: 8 July 2004 * * July, 2012, JimF small changes made, many more should be done. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ike; #elif FMT_REGISTERS_H john_register_one(&fmt_ike); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "ike-crack.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 16 static int omp_t = 1; #endif #include "memdbg.h" #define FORMAT_LABEL "IKE" #define FORMAT_NAME "PSK" #define ALGORITHM_NAME "HMAC MD5/SHA1 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 20 /* SHA1 */ #define BINARY_SIZE_SMALLER 16 /* MD5 */ #define SALT_SIZE sizeof(psk_entry) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(size_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 16 static struct fmt_tests ike_tests[] = { {"$ike$*0*5c7916ddf8db4d233b3b36005bb3ccc115a73807e11a897be943fd4a2d0f942624cb00588d8b3a0a26502b73e639df217ef6c4cb90f96b0a3c3ef2f62ed025b4a705df9de65e33e380c1ba5fa23bf1f9911bbf388d0844256fa0131fc5cf8acb396936ba3295b4637b039d93f58db90a3a1cf1ef5051103bacf6e1a3334f9f89*fde8c68c5f324c7dbcbadde1d757af6962c63496c009f77cad647f2997fd4295e50821453a6dc2f6279fd7fef68768584d9cee0da6e68a534a097ce206bf77ecc798310206f3f82d92d02c885794e0a430ceb2d6b43c2aff45a6e14c6558382df0692ff65c2724eef750764ee456f31424a5ebd9e115d826bbb9722111aa4e01*b2a3c7aa4be95e85*756e3fa11c1b102c*00000001000000010000002c01010001000000240101000080010001800200018003000180040002800b0001000c000400007080*01000000ac100202*251d7ace920b17cb34f9d561bca46d037b337d19*e045819a64edbf022620bff3efdb935216584cc4*b9c594fa3fca6bb30a85c4208a8df348", "abc123"}, {"$ike$*0*9bdee7aa341cf1a6c19bc0191106b5056537ce6b837cd70678ea5a3ccb606b56dee4548feb67f24fd6f4d5f58967a9ff3c674d9d79e4195b7def5aac147c9fe9abdc2f8ba2eca58f4c863fedc7a8c8e1ad6e1551b1e44bf9a0e258561a5db1c2ca1e8b5dfda1b012012b6fdf24ecd07da6b10d76ab3b58d07b30b4f9da26aee4*c9b7ef0610a22b3e1c88b1a01ce4d4110edf6baa122ed1285eb2184cd75d30a11520a725c2d263de5a157f77f953880732f3b14521836d7f3585cb0ce3fcadf81c541dde2680bd81953cf88e8f8096c173470694ca7414fff9df0cdcdbb9d4f70ef1d6347293b507cfad965e2d2c1fa07326353e9a493d93284970040344fb11*3506592130312567*6c362583ce7a2a26*00000001000000010000002c01010001000000240101000080010001800200028003000180040002800b0001000c000400007080*01000000ac100202*84943233f42a0b5a9b33c327162fe0efee2545e4*76f451dce3fea6402b67f3fddae561ebdb4a6efe*f63f237b3c0f1fe57a5b852203cfd27cbf0c78d4", "abc123"}, {NULL} }; static psk_entry *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ptr, *ctcopy, *keeptr; if (strncmp(ciphertext, "$ike$*", 6)) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += 6; /* skip leading '$ike$*' */ if (*ctcopy != '0' && *ctcopy != '1') goto error; /* skip '*0' */ ctcopy += 1; if (*ctcopy != '*') goto error; ctcopy += 1; if (!(ptr = strtok(ctcopy, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishex(ptr)) goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static psk_entry cs; cs.isnortel = atoi(&ciphertext[6]); load_psk_params(&ciphertext[8], NULL, &cs); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE_SMALLER; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (psk_entry *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { compute_hash(cur_salt, saved_key[index], (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (*((ARCH_WORD_32*)binary) == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return (*((ARCH_WORD_32*)binary) == crypt_out[index][0]); } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE_SMALLER); } static void ike_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } /* * For ike, the hash algorithm used for hmac * is returned as the first "tunable cost": * 1: MD5 * 2: SHA1 * * However, the there is almost no difference in speed, * so if the different hash types for HMAC shouldn't be reported, * just define IKE_REPORT_TUNABLE_COSTS to be 0 instead of 1. */ #define IKE_REPORT_TUNABLE_COSTS 1 #if FMT_MAIN_VERSION > 11 && IKE_REPORT_TUNABLE_COSTS static unsigned int tunable_cost_hmac_hash_type(void *salt) { psk_entry *my_salt; my_salt = salt; return (unsigned int) my_salt->hash_type; } #endif struct fmt_main fmt_ike = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE_SMALLER, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { #if IKE_REPORT_TUNABLE_COSTS "hash algorithm used for hmac [1:MD5 2:SHA1]", #else NULL #endif }, #endif ike_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { #if IKE_REPORT_TUNABLE_COSTS tunable_cost_hmac_hash_type, #else NULL #endif }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, ike_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
parallelLUFactorization-pipeline.c
// C Program to decompose a matrix into // lower and upper traingular matrix #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <limits.h> #include <omp.h> int n = 4; struct record_s { double val; long prod; struct record_s* next; }; struct buf_list { struct record_s* head_p; struct record_s* tail_p; }; struct buf_list buff[4]; int producers_done[4]; struct record_s* Dequeue(long thread) { struct record_s* rec_p = malloc(sizeof(*rec_p)); if (buff[thread].head_p == NULL) { return NULL; } else if (buff[thread].head_p == buff[thread].tail_p) { rec_p = buff[thread].head_p; buff[thread].head_p = buff[thread].tail_p = NULL; } else { rec_p = buff[thread].head_p; buff[thread].head_p = buff[thread].head_p->next; } return rec_p; } double Get(long thread) { struct record_s* rec_p; double data; while (producers_done[thread] < 1 || buff[thread].head_p != NULL) { #pragma omp critical (queue) { rec_p = Dequeue(thread); } if (rec_p != NULL) { data = rec_p -> val; free(rec_p); return data; } } return 0.; } struct record_s* Create_record(long thread, double data) { struct record_s* rec_p = malloc(sizeof(*rec_p)); rec_p->next=NULL; rec_p->prod=thread; rec_p->val=data; return rec_p; } void Enqueue(long thread, struct record_s* rec_p) { if (buff[thread].tail_p == NULL) { buff[thread].head_p = rec_p; } else { buff[thread].tail_p->next = rec_p; } buff[thread].tail_p = rec_p; } void Put(long thread, double data) { struct record_s *rec_p; rec_p = Create_record(thread,data); #pragma omp critical(queue) { Enqueue(thread, rec_p); } #pragma omp critical(done) producers_done[thread]++; } // print matrix to file void printNxNMatrix(double** matrix, int n, FILE *fp) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fprintf(fp, "%lf ", matrix[i][j]); } fprintf(fp, "\n"); } } // multiply two matrix int multiply(int m1, int m2, double** lower, int n1, int n2, double** upper, double** matrix, int T, FILE *fp) { int x, i, j; double** res_matrix = (double**)malloc(m1*sizeof(double*)); for (i = 0; i<m1; i++){ res_matrix[i] = (double*)malloc(n2*sizeof(double)); } int ii; #pragma omp parallel num_threads(T) { #pragma omp for { for (ii=0; ii < m1; ii++) { int jj, kk; for (jj=0; jj < n2; jj++){ for (kk=0; kk<n1; kk++){ res_matrix[ii][jj] += (lower[ii][kk]) * (upper[kk][jj]); } } } } #pragma omp barrier } for (i = 0; i < m1; i++) { for (j = 0; j < n2; j++) { if(llabs((long long)matrix[i][j] - (long long)res_matrix[i][j]) > 0.0001){ printNxNMatrix(res_matrix, n, fp); return 0; } } } return 1; } void luDecomposition(double** matrix, int n, int p) { // Setting lower and upper...\n"); double** lower = (double**)malloc(n*sizeof(double*)); double** upper = (double**)malloc(n*sizeof(double*)); for (int i = 0; i<n; i++){ lower[i] = (double*)malloc((n)*sizeof(double)); upper[i] = (double*)malloc((n)*sizeof(double)); } for (int i = 0; i < n; i++){ for (int j = 0; j < n ; j++){ lower[i][j] = matrix[i][j]; upper[i][j] = matrix[i][j]; } } int i,j,k,row; omp_set_num_threads(p); // Decomposing matrix into Upper and Lower // triangular matrix double t = omp_get_wtime(); #pragma omp parallel private(k, i, j, row) shared(matrix, lower, upper) num_threads(p) { long threadID = omp_get_thread_num(); int blockSize = n/p; if (threadID != 0) { for (k = 0; k < n-1; k++) { row = Get(threadID); Put(threadID+1, row); for (i = threadID * blockSize; i < threadID * blockSize + blockSize; i++) { if (row < i) { lower[i][row] = upper[i][row] / upper[row][row]; } } for (i = threadID * blockSize; i < threadID * blockSize + blockSize; i++) { for (j = k; j < n; j++) { if (row < i) { upper[i][j] = upper[i][j] - lower[i][row] * upper[row][j]; } } } } // thread 0 } else { for (k = 0; k < n-1; k++) { Put(threadID+1,k); for (i = k+1; i < threadID * blockSize + blockSize; i++) { lower[i][k] = upper[i][k]/upper[k][k]; } for (i = k+1; i < threadID * blockSize + blockSize; i++) { for (j = k+1; j < n; j++) { upper[i][j] = upper[i][j] - lower[i][k] * upper[k][j]; } } } } } for (int j = 0; j < n; j++) { for(int i = 0; i < n; i++){ if(i > j){ upper[i][j] = 0.; }else{ lower[i][j] = 0.; } } lower[j][j] = 1.; } // putting lower matrix into file FILE *fp; fp = fopen("lower_matrix.txt", "w"); printNxNMatrix(lower, n, fp); fclose(fp); // putting upper matrix into file fp = fopen("upper_matrix.txt", "w"); printNxNMatrix(upper, n, fp); fclose(fp); // check correctness fp = fopen("multiplication_result.txt","w"); if (multiply(n, n, lower, n, n, upper, matrix, 4, fp)){ printf("Pass the test.\n"); } fclose(fp); } // Driver code int main() { int i, j; FILE *fp; // read in file1 fp = fopen("file_six_six.txt", "r"); fscanf(fp, "%i", &(n)); double** matrix = (double**)malloc(n*sizeof(double*)); for (i = 0; i<n; i++){ matrix[i] = (double*)malloc((n)*sizeof(double)); } for (i = 0; i < n; ++i) { for (j = 0;j < n; ++j) { fscanf(fp, "%lf", &matrix[i][j]); } } fclose(fp); luDecomposition(matrix, n, 2); return 0; }
stochqn.c
/* Stochastic limited-memory Quasi-Newton optimization Methods for smooth stochastic optimization of both convex and non-convex functions, using search directions computed by an approximated inverse Hessian-vector product, which is obtained through limited-memory BFGS recursive formula. The implementations are based on the following works: * Byrd, R.H., Hansen, S.L., Nocedal, J. and Singer, Y., 2016. "A stochastic quasi-Newton method for large-scale optimization." SIAM Journal on Optimization, 26(2), pp.1008-1031. (SQN) * Schraudolph, N.N., Yu, J. and Günter, S., 2007, March. "A stochastic quasi-Newton method for online convex optimization." In Artificial Intelligence and Statistics (pp. 436-443). (oLBFGS) * Keskar, N.S. and Berahas, A.S., 2016, September. "adaQN: An Adaptive Quasi-Newton Algorithm for Training RNNs." In Joint European Conference on Machine Learning and Knowledge Discovery in Databases (pp. 1-16). Springer, Cham. (adaQN) * Wright, S. and Nocedal, J., 1999. "Numerical optimization." (ch 7) Springer Science, 35(67-68), p.7. (L-BFGS two-loop recursion, and correction pairs based on gradient differences) Written for C99 standard with fixes for compilation with OpenMP 2.0 (e.g. MSVC). BSD 2-Clause License Copyright (c) 2020, David Cortes All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Standard headers */ #include <stdlib.h> #include <string.h> #include <stddef.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifndef _FOR_R #include <stdio.h> #endif /* Library header */ #include "stochqn.h" /* BLAS functions */ #ifdef _FOR_PYTON #include "findblas.h" /* https://www.github.com/david-cortes/findblas */ #elif defined(_FOR_R) #include "blas_R.h" #include <R_ext/Print.h> #define fprintf(f, message) REprintf(message) #else #include "blasfuns.h" #endif /* --------------- Preprocessor definitions --------------- */ /* Aliasing for compiler optimizations */ #ifdef __cplusplus #if defined(__GNUG__) || defined(__GNUC__) || defined(_MSC_VER) || defined(__clang__) || defined(__INTEL_COMPILER) #define restrict __restrict #else #define restrict #endif #elif defined(_MSC_VER) #define restrict __restrict #elif !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) #define restrict #endif /* In-lining for faster calls */ #ifndef __cplusplus #if defined(_MSC_VER) #define inline __inline #elif !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) #define inline #endif #endif /* OpenMP < 3.0 (e.g. MSVC as of 2019) does not support parallel for's with unsigned iterators, and does not support declaring the iterator type in the loop itself */ #ifdef _OPENMP #if (_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64) /* OpenMP < 3.0 */ #define size_t_for #else #define size_t_for size_t #endif #else #define size_t_for size_t #endif #ifndef isnan #ifdef _isnan #define isnan _isnan #else #define isnan(x) ( (x) != (x) ) #endif #endif #ifndef isinf #ifdef _finite #define isinf(x) (!_finite(x)) #else #define isinf(x) ( (x) >= HUGE_VAL || (x) <= -HUGE_VAL ) #endif #endif #define x_avg x_sum /* this is to keep track of when the sum array has been divided */ #define min2(a, b) (((a) < (b))? (a) : (b)) /* --------------- End of preprocessor definitions --------------- */ #ifdef __cplusplus extern "C" { #endif /* --------------- General-purpose helpers --------------- */ static inline void copy_arr(const real_t *restrict src, real_t *restrict dest, const int n, const int nthreads) { /* Note: don't use BLAS dcopy as it's actually much slower */ #if defined(_OPENMP) int i; int chunk_size = n / nthreads; int remainder = n % nthreads; /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = min2(nthreads, 2); /* Note: on x86, using more than 2 threads will end up making it slower */ #pragma omp parallel for schedule(static, 1) firstprivate(src, dest, chunk_size, nthreads) num_threads(nthreads_non_const) for (i = 0; i < nthreads; i++){ memcpy(dest + i * chunk_size, src + i * chunk_size, sizeof(real_t) * chunk_size); } if (remainder > 0){ memcpy(dest + nthreads * chunk_size, src + nthreads * chunk_size, sizeof(real_t) * remainder); } #else memcpy(dest, src, sizeof(real_t) * n); #endif } static inline void set_to_zero(real_t arr[], const int n, const int nthreads) { #if defined(_OPENMP) int i; int chunk_size = n / nthreads; int remainder = n % nthreads; /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = min2(nthreads, 2); /* Note: on x86 CPUs, using more than 2 threads will make it slower */ #pragma omp parallel for schedule(static, 1) firstprivate(arr, chunk_size, nthreads) num_threads(nthreads_non_const) for (i = 0; i < nthreads; i++){ memset(arr + i * chunk_size, 0, sizeof(real_t) * chunk_size); } if (remainder > 0){ memset(arr + nthreads * chunk_size, 0, sizeof(real_t) * remainder); } #else memset(arr, 0, sizeof(real_t) * n); #endif } static inline void multiply_elemwise(real_t *restrict inout, const real_t *restrict other, const int n, const int nthreads) { #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */ int i; int n_szt = n; #else size_t n_szt = (size_t) n; #endif /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; #pragma omp parallel for if((n > 1e7) && (nthreads > 4)) schedule(static) firstprivate(inout, other, n_szt) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) inout[i] *= other[i]; } static inline void difference_elemwise(real_t *restrict out, const real_t *restrict later, const real_t *restrict earlier, const int n, const int nthreads) { #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */ int i; int n_szt = n; #else size_t n_szt = (size_t) n; #endif /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; #pragma omp parallel for if( (n > 1e7) && (nthreads > 4)) schedule(static) firstprivate(n_szt, out, later, earlier) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) out[i] = later[i] - earlier[i]; } static inline int check_inf_nan(const real_t arr[], const int n, const int nthreads) { size_t n_szt = (size_t) n; int is_wrong = 0; #if defined(_OPENMP) & !defined(_WIN32) &!defined(_WIN64) & (_OPENMP > 201305) /* OpenMP >= 4.0 */ /* Note1: in most cases the array should not have invalid elements Note2: 'omp cancel' is disabled by default through an environmental variable, and it will ignore modifications of it within the same calling program, so it very likely willnot end up cancelling for most use-cases. */ /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; if ( (n > 1e8) && (nthreads > 4) ){ #pragma omp parallel for schedule(static) firstprivate(arr, n_szt) reduction(max: is_wrong) num_threads(nthreads_non_const) for (size_t i = 0; i < n_szt; i++){ if (isinf(arr[i])){ is_wrong = 1; // #pragma omp cancel for } if (isnan(arr[i])){ is_wrong = 1; // #pragma omp cancel for } } } else #endif { for (size_t i = 0; i < n_szt; i++){ if (isinf(arr[i])){return 1;} if (isnan(arr[i])){return 1;} } } if (is_wrong){return 1;} return 0; } static inline void add_to_sum(const real_t *restrict new_values, real_t *restrict sum_arr, const size_t n, const int nthreads) { /* Note: daxpy in MKL is actually slower than this */ #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */ int i; int n_szt = n; #else size_t n_szt = (size_t) n; #endif /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; #pragma omp parallel for if((n > 1e7) && (nthreads_non_const > 4)) schedule(static) firstprivate(sum_arr, new_values, n_szt) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) sum_arr[i] += new_values[i]; } static inline void average_from_sum(real_t arr_sum[], const size_t n_summed, const int n) { if (n_summed > 1){ cblas_tscal(n, 1 / (real_t) n_summed, arr_sum, 1); } } /* --------------- End of general-purpose helpers --------------- */ /* Optimizers have a workspace that works pretty much like a C++ class. This is a long piece of code dealing with memory management, you'll probably want to skip it. */ /* --------- Beginning of initializers, deallocators, and updaters -------- */ bfgs_mem* initialize_bfgs_mem(const size_t mem_size, const int n, const real_t min_curvature, const real_t y_reg, const size_t upd_freq) { real_t *s_bak; real_t *y_bak; if (min_curvature > 0){ s_bak = (real_t*) malloc(sizeof(real_t) * n); y_bak = (real_t*) malloc(sizeof(real_t) * n); } else { s_bak = NULL; y_bak = NULL; } real_t *s_mem = (real_t*) malloc(sizeof(real_t) * n * mem_size); real_t *y_mem = (real_t*) malloc(sizeof(real_t) * n * mem_size); real_t *buffer_rho = (real_t*) malloc(sizeof(real_t) * mem_size); real_t *buffer_alpha = (real_t*) malloc(sizeof(real_t) * mem_size); bfgs_mem *out = (bfgs_mem*) malloc(sizeof(bfgs_mem)); out->s_mem = s_mem; out->y_mem = y_mem; out->buffer_rho = buffer_rho; out->buffer_alpha = buffer_alpha; out->s_bak = s_bak; out->y_bak = y_bak; out->mem_size = mem_size; out->mem_used = 0; out->mem_st_ix = 0; out->upd_freq = upd_freq; out->y_reg = y_reg; out->min_curvature = min_curvature; return out; } void dealloc_bfgs_mem(bfgs_mem *bfgs_memory) { free(bfgs_memory->s_mem); free(bfgs_memory->y_mem); free(bfgs_memory->buffer_rho); free(bfgs_memory->buffer_alpha); free(bfgs_memory->s_bak); free(bfgs_memory->y_bak); free(bfgs_memory); } fisher_mem* initialize_fisher_mem(const size_t mem_size, const int n) { real_t *F = (real_t*) malloc(sizeof(real_t) * n * mem_size); real_t *buffer_y = (real_t*) malloc(sizeof(real_t) * mem_size); fisher_mem *out = (fisher_mem*) malloc(sizeof(fisher_mem)); out->F = F; out->buffer_y = buffer_y; out->mem_size = mem_size; out->mem_used = 0; out->mem_st_ix = 0; return out; } void dealloc_fisher_mem(fisher_mem *fisher_memory) { free(fisher_memory->F); free(fisher_memory->buffer_y); free(fisher_memory); } static inline int check_bfgsmem_nonnull(bfgs_mem* bfgs_memory) { if ( (bfgs_memory->y_mem == NULL) || (bfgs_memory->s_mem == NULL) || (bfgs_memory->buffer_rho == NULL) || (bfgs_memory->buffer_alpha == NULL) || (bfgs_memory->s_bak == NULL && bfgs_memory->min_curvature > 0) || (bfgs_memory->y_bak == NULL && bfgs_memory->min_curvature > 0) ) { fprintf(stderr, "Error: Could not allocate memory for BFGS storage.\n"); return 1; } return 0; } static inline int check_fishermem_nonnull(fisher_mem* fisher_memory) { if (fisher_memory->F == NULL || fisher_memory->buffer_y == NULL){ fprintf(stderr, "Error: Could not allocate memory for Fisher storage.\n"); return 1; } return 0; } static inline int check_oLBFGS_nonnull(workspace_oLBFGS *oLBFGS) { /* Check for memory allocation failure */ if ( (oLBFGS->bfgs_memory == NULL) || (oLBFGS->grad_prev == NULL) || (oLBFGS == NULL) ){ fprintf(stderr, "Error: Could not allocate memory for oLBFGS.\n"); return 1; } return check_bfgsmem_nonnull(oLBFGS->bfgs_memory); } static inline int check_SQN_nonnull(workspace_SQN *SQN) { /* Check for memory allocation failure */ if ( (SQN->bfgs_memory == NULL) || (SQN->x_sum == NULL) || (SQN->x_avg_prev == NULL) || (SQN->grad_prev == NULL && SQN->use_grad_diff) || (SQN == NULL) ){ dealloc_SQN(SQN); fprintf(stderr, "Error: Could not allocate memory for SQN.\n"); return 1; } return check_bfgsmem_nonnull(SQN->bfgs_memory); } static inline int check_adaQN_nonnull(workspace_adaQN *adaQN) { /* Check for memory allocation failure */ if ( (adaQN->bfgs_memory == NULL) || (adaQN->H0 == NULL) || (adaQN->x_sum == NULL) || (adaQN->x_avg_prev == NULL) || (adaQN->grad_sum_sq == NULL) || (adaQN->grad_prev == NULL && adaQN->use_grad_diff) || (adaQN == NULL) ){ dealloc_adaQN(adaQN); fprintf(stderr, "Error: Could not allocate memory for adaQN.\n"); return 1; } if ( check_bfgsmem_nonnull(adaQN->bfgs_memory) ) {return 1;}; if (!adaQN->use_grad_diff){return check_fishermem_nonnull(adaQN->fisher_memory);} return 0; } void dealloc_oLBFGS(workspace_oLBFGS *oLBFGS) { dealloc_bfgs_mem(oLBFGS->bfgs_memory); free(oLBFGS->grad_prev); free(oLBFGS); } void dealloc_SQN(workspace_SQN *SQN) { dealloc_bfgs_mem(SQN->bfgs_memory); free(SQN->grad_prev); free(SQN->x_sum); free(SQN->x_avg_prev); free(SQN); } void dealloc_adaQN(workspace_adaQN *adaQN) { dealloc_bfgs_mem(adaQN->bfgs_memory); if (!adaQN->use_grad_diff || adaQN->fisher_memory != NULL){ dealloc_fisher_mem(adaQN->fisher_memory); } free(adaQN->H0); free(adaQN->grad_prev); free(adaQN->x_sum); free(adaQN->x_avg_prev); free(adaQN->grad_sum_sq); free(adaQN); } workspace_oLBFGS* initialize_oLBFGS(const int n, const size_t mem_size, const real_t hess_init, const real_t y_reg, const real_t min_curvature, const int check_nan, const int nthreads) { bfgs_mem *bfgs_memory = initialize_bfgs_mem(mem_size, n, min_curvature, y_reg, 1); real_t *grad_prev = (real_t*) malloc(sizeof(real_t) * n); workspace_oLBFGS *out = (workspace_oLBFGS*) malloc(sizeof(workspace_oLBFGS)); out->bfgs_memory = bfgs_memory; out->grad_prev = grad_prev; out->hess_init = hess_init; out->niter = 0; out->section = 0; out->check_nan = check_nan; out->nthreads = nthreads; out->n = n; if ( check_oLBFGS_nonnull(out) ) {dealloc_oLBFGS(out); return NULL;} return out; } workspace_SQN* initialize_SQN(const int n, const size_t mem_size, const size_t bfgs_upd_freq, const real_t min_curvature, const int use_grad_diff, const real_t y_reg, const int check_nan, const int nthreads) { real_t *grad_prev; if (use_grad_diff){grad_prev = (real_t*) malloc(sizeof(real_t) * n);} else {grad_prev = NULL;} bfgs_mem *bfgs_memory = initialize_bfgs_mem(mem_size, n, min_curvature, y_reg, bfgs_upd_freq); real_t *x_sum = (real_t*) calloc(n, sizeof(real_t)); real_t *x_avg_prev = (real_t*) malloc(sizeof(real_t) * n); workspace_SQN* out = (workspace_SQN*) malloc(sizeof(workspace_SQN)); out->bfgs_memory = bfgs_memory; out->grad_prev = grad_prev; out->x_sum = x_sum; out->x_avg_prev = x_avg_prev; out->use_grad_diff = use_grad_diff; out->niter = 0; out->section = 0; out->check_nan = check_nan; out->nthreads = nthreads; out->n = n; if ( check_SQN_nonnull(out) ) {dealloc_SQN(out); return NULL;} return out; } workspace_adaQN* initialize_adaQN(const int n, const size_t mem_size, const size_t fisher_size, const size_t bfgs_upd_freq, const real_t max_incr, const real_t min_curvature, const real_t scal_reg, const real_t rmsprop_weight, const int use_grad_diff, const real_t y_reg, const int check_nan, const int nthreads) { bfgs_mem *bfgs_memory = initialize_bfgs_mem(mem_size, n, min_curvature, y_reg, bfgs_upd_freq); fisher_mem *fisher_memory; real_t *grad_prev; if (use_grad_diff){ fisher_memory = NULL; grad_prev = (real_t*) malloc(sizeof(real_t) * n); } else { fisher_memory = initialize_fisher_mem(fisher_size, n); grad_prev = NULL; } real_t *H0 = (real_t*) malloc(sizeof(real_t) * n); real_t *x_sum = (real_t*) calloc(n, sizeof(real_t)); real_t *x_avg_prev = (real_t*) malloc(sizeof(real_t) * n); real_t *grad_sum_sq = (real_t*) calloc(n, sizeof(real_t)); workspace_adaQN *out = (workspace_adaQN*) malloc(sizeof(workspace_adaQN)); out->bfgs_memory = bfgs_memory; out->fisher_memory = fisher_memory; out->H0 = H0; out->grad_prev = grad_prev; out->x_sum = x_sum; out->x_avg_prev = x_avg_prev; out->grad_sum_sq = grad_sum_sq; out->max_incr = max_incr; out->scal_reg = scal_reg; out->rmsprop_weight = rmsprop_weight; out->use_grad_diff = use_grad_diff; out->f_prev = 0; out->niter = 0; out->section = 0; out->check_nan = check_nan; out->nthreads = nthreads; out->n = n; if ( check_adaQN_nonnull(out) ){dealloc_adaQN(out); return NULL;} return out; } /* Functions for adding and discarding correction pairs and previous gradients. When deleted, the data is not overwritten or freed, but the indexes are reset to act as if they were not present. */ static inline void flush_bfgs_mem(bfgs_mem *bfgs_memory) { bfgs_memory->mem_used = 0; bfgs_memory->mem_st_ix = 0; } static inline void flush_fisher_mem(fisher_mem *fisher_memory) { if (fisher_memory != NULL) { fisher_memory->mem_used = 0; fisher_memory->mem_st_ix = 0; } } static inline void incr_bfgs_counters(bfgs_mem *bfgs_memory) { bfgs_memory->mem_st_ix = (bfgs_memory->mem_st_ix + 1) % bfgs_memory->mem_size; bfgs_memory->mem_used = ((bfgs_memory->mem_used + 1) >= bfgs_memory->mem_size)? bfgs_memory->mem_size : (bfgs_memory->mem_used + 1); } static inline void incr_fisher_counters(fisher_mem *fisher_memory) { fisher_memory->mem_st_ix = (fisher_memory->mem_st_ix + 1) % fisher_memory->mem_size; fisher_memory->mem_used = ((fisher_memory->mem_used + 1) >= fisher_memory->mem_size)? fisher_memory->mem_size : (fisher_memory->mem_used + 1); } static inline void add_to_fisher_mem(real_t grad[], fisher_mem *fisher_memory, const int n, const int nthreads) { if (fisher_memory != NULL){ copy_arr(grad, fisher_memory->F + fisher_memory->mem_st_ix * n, n, nthreads); incr_fisher_counters(fisher_memory); } } static inline void backup_corr_pair(bfgs_mem *bfgs_memory, const int n, const int nthreads) { if (bfgs_memory->min_curvature > 0){ copy_arr(bfgs_memory->s_bak, bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n, n, nthreads); copy_arr(bfgs_memory->y_bak, bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n, n, nthreads); } } static inline void rollback_corr_pair(bfgs_mem *bfgs_memory, const int n, info_enum *iter_info, const int nthreads) { if (bfgs_memory->min_curvature > 0){ copy_arr(bfgs_memory->s_bak, bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n, n, nthreads); copy_arr(bfgs_memory->y_bak, bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n, n, nthreads); *iter_info = curvature_too_small; } } static inline void archive_x_avg(real_t x_avg[], real_t x_avg_prev[], const int n, const int nthreads) { copy_arr(x_avg, x_avg_prev, n, nthreads); set_to_zero(x_sum, n, nthreads); /* x_avg is aliased to x_sum */ } /* --------- End of initializers, deallocators, and updaters -------- */ /* ============= Optimization algorithms section ============= Note: the functions here oftentimes have an input variable 'nthreads', but most of the work is done through BLAS functions, and the number of threads for them is set beforehand in the optimizer functions. */ /* Approximate H^(-1) * g through the "L-BFGS two-loop recursion" For the variable names, refer to: Wright, S. and Nocedal, J., 1999. "Numerical optimization." (ch. 7) grad (in, out) : real_t[n] Gradient for the current values of the variables - the computed search direction will be written to this same array, overwriting the gradient. n : int Number of variables (dimensionality of 'x') H0 : real_t[n] or NULL Initial matrix H0 (diagonal only) from which H^-1 is updated. If passing NULL here and zero to 'h0', will use a scalar value as suggested in the book "Numerical optimization." (Wright & Nocedal) h0 : real_t number to which to initialize the diagonal H0. If passing zero here and NULL to 'H0', will use a scalar value as suggested in the book "Numerical optimization." (Wright & Nocedal) y_mem : real_t[mem_size, n] 'y' correction variables. These shall be ordered from earliest to latest, with the earliest vector not necessarily at the first position. s_mem : real_t[mem_size, n] 's' correction variables. These shall be ordered from earliest to latest, with the earliest vector not necessarily at the first position. mem_size : size_t Dimensionality of the arrays 'y_mem' and 's_mem' (how many rows it can have). mem_used : size_t Number of filled rows in 'y_mem' and 's_mem' mem_st_ix : size_t Position in 'y_mem' and 's_mem' at which the earliest vector is stored, with later elements following onwards, continuing at the beginning after position 'mem_used' if this is not zero. buffer_rho : real_t[mem_size] Temporary array in which to store the computed rho values. buffer_alpha : real_t[mem_size] Temporary array in which to store the computed alpha values. nthreads : int Number of parallel threads to use - most of the work is done on a BLAS library (and the threads for it are set elsewhere), but for very large problems, passes over the grad/out array can also be parallelized. */ static inline void approx_inv_hess_grad(real_t grad[], int n, real_t H0[], real_t h0, real_t y_mem[], real_t s_mem[], size_t mem_size, size_t mem_used, size_t mem_st_ix, real_t buffer_rho[], real_t buffer_alpha[], int nthreads) { real_t scaling, beta; size_t i, ipos, last_pos; /* backward pass: alpha <- rho * s' q; q <- q - alpha * y */ for (size_t ii = 0; ii < mem_used; ii++) { i = mem_used - ii - 1; ipos = (mem_st_ix + i) % mem_size; buffer_rho[i] = 1 / cblas_tdot(n, y_mem + ipos*n, 1, s_mem + ipos*n, 1); buffer_alpha[i] = buffer_rho[i] * cblas_tdot(n, grad, 1, s_mem + ipos*n, 1); cblas_taxpy(n, -buffer_alpha[i], y_mem + ipos*n, 1, grad, 1); } /* Use a diagonal matrix as a starting point: By default, will calculate it from the last correction pair */ if ( (H0 == NULL) && (h0 <= 0) ) { last_pos = (mem_st_ix - 1 + mem_used) % mem_size; scaling = cblas_tdot(n, s_mem + last_pos*n, 1, y_mem + last_pos*n, 1) / cblas_tdot(n, y_mem + last_pos*n, 1, y_mem + last_pos*n, 1); cblas_tscal(n, scaling, grad, 1); } /* But can also initialize it from values supplied by the user */ else { /* Use diagonal passed by user */ if (H0 != NULL) { multiply_elemwise(grad, H0, n, nthreads); } /* Use scalar passed by user */ else { cblas_tscal(n, h0, grad, 1); } } /* forward pass: beta <- rho * y' * r; r <- r * s * (alpha - beta) */ for (size_t i = 0; i < mem_used; i++) { ipos = (mem_st_ix + i) % mem_size; beta = buffer_rho[i] * cblas_tdot(n, y_mem + ipos*n, 1, grad, 1); cblas_taxpy(n, buffer_alpha[i] - beta, s_mem + ipos*n, 1, grad, 1); } } /* Update the data on previous squared gradients Can use either AdaGrad (simple sum) or RMSProp (squared sum) grad : new gradient to add grad_sum_sq (in, out) : array where to store sum of squared past gradients rmsprop_weight : weight in interval(0,1) to give to old info (if 0, will use AdaGrad) n : number of variables (dimensionality of 'x') nthreads : number of parallel threads to use */ static inline void update_sum_sq(real_t *restrict grad, real_t *restrict grad_sum_sq, real_t rmsprop_weight, int n, int nthreads) { #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) int n_szt = n; int i; #else size_t n_szt = (size_t) n; #endif real_t weight_new; /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; /* RMSProp update */ if (rmsprop_weight > 0 && rmsprop_weight < 1) { weight_new = 1 - rmsprop_weight; #pragma omp parallel for if( (n > 1e7) && (nthreads_non_const > 4)) schedule(static) firstprivate(n_szt, grad, grad_sum_sq, rmsprop_weight, weight_new) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) grad_sum_sq[i] = rmsprop_weight*grad_sum_sq[i] + weight_new*(grad[i] * grad[i]); } /* AdaGrad update */ else { #pragma omp parallel for if( (n > 1e7) && (nthreads_non_const > 4)) schedule(static) firstprivate(n_szt, grad, grad_sum_sq) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) grad_sum_sq[i] += grad[i] * grad[i]; } } /* Compute a search direction (used as H0 initializer by adaQN) as rescaled gradient using a diagonal matrix, given by the sums of squares of past gradients (AdaGrad or RMSProp formulae). direction (out) : array where to save the computed direction. (if NULL, will save the direction in the same 'grad' array) grad (in, out) : current gradient grad_sum_sq (in, out) : sum of squares of past gradients (weighted sum for RMSProp) n : number of variables (dimensionality of 'x') scal_reg : regularization (epsilon) for the scaling rmsprop_weight : weight for old gradients if using RMSProp (pass 0 for AdaGrad init) num_threads : number of parallel threads to use */ static inline void diag_rescal(real_t *restrict direction, real_t *restrict grad, real_t *restrict grad_sum_sq, int n, real_t scal_reg, real_t rmsprop_weight, int nthreads) { update_sum_sq(grad, grad_sum_sq, rmsprop_weight, n, nthreads); #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) int i; int n_szt = n; #else size_t n_szt = (size_t) n; #endif /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; if (direction == NULL) { #pragma omp parallel for if( (n > 1e7) && (nthreads_non_const >= 4) ) schedule(static) firstprivate(direction, grad_sum_sq, scal_reg, n_szt) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) grad[i] /= sqrt(grad_sum_sq[i] + scal_reg); } else { #pragma omp parallel for if( (n > 1e7) && (nthreads_non_const >= 4) ) schedule(static) firstprivate(direction, grad_sum_sq, scal_reg, n_szt) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) direction[i] = grad[i] / sqrt(grad_sum_sq[i] + scal_reg); } } /* Take a step in the search direction specified by the respective algorithm step_size : size of the step to take n : number of variables (dimensionality of 'x') x (in, out) : current values of the variables grad (in, out) : gradient at current values of x - the search direction will be written there, overwriting the gradient bfgs_memory : BFGS memory struct rmsprop_weight (adaQN) : weight for old gradients if using RMSProp (pass 0 for SQN, oLBFGS, and adaQN with AdaGrad init) H0 (adaQN) : temporary array where to store diagonal initializer for inv. Hessian grad_sum_sq (adaQN)(in,out) : sums of squares of past gradients (weighted sums in RMSProp) scal_reg (adaQN) : regularization for the diagonal rescaling using grad_sum_sq check_nan : whether to check the search direction for NaN or Inf (will reject it if so) iter_info : pointer to the indicator on encountered problems nthreads : number of parallel threads to use */ static inline void take_step(real_t step_size, int n, real_t x[], real_t grad[], bfgs_mem *bfgs_memory, real_t rmsprop_weight, real_t H0[], real_t h0, real_t grad_sum_sq[], real_t scal_reg, int check_nan, info_enum *iter_info, int nthreads) { /* When there are no correction pairs, take a gradient or rescaled gradient step */ if (bfgs_memory->mem_used == 0) { /* If no rescaling, take a simple gradient step, otherwise, take AdaGrad or RMSProp step */ if (grad_sum_sq != NULL) {diag_rescal(NULL, grad, grad_sum_sq, n, scal_reg, rmsprop_weight, nthreads);} } /* When there are correction pairs, get an approx. invHess-grad direction (with diagonal init) */ else { if (grad_sum_sq != NULL) { diag_rescal(H0, grad, grad_sum_sq, n, scal_reg, rmsprop_weight, nthreads); } approx_inv_hess_grad(grad, n, H0, h0, bfgs_memory->y_mem, bfgs_memory->s_mem, bfgs_memory->mem_size, bfgs_memory->mem_used, (bfgs_memory->mem_st_ix == bfgs_memory->mem_used)? 0 : bfgs_memory->mem_st_ix, bfgs_memory->buffer_rho, bfgs_memory->buffer_alpha, nthreads); } /* Check if the search direction is invalid */ if (check_nan) { if ( check_inf_nan(grad, n, nthreads) || /* There are also cases in which the search direction is not NaN, but is too large nevertheless */ cblas_tnrm2(n, grad, 1) > 1e3 * n ) { flush_bfgs_mem(bfgs_memory); *iter_info = search_direction_was_nan; return; } } /* Finally, take step in computed direction */ cblas_taxpy(n, -step_size, grad, 1, x, 1); } /* Update 's' correction vector If there's a curvature threshold, will also create a backup of the correction pair currently sitting in the memory slot into which the new pair will be written. Note that this procedure will not copy the new average into the previous average array, which needs to be done after updating 'y' in the main optimization function. x_sum : sum of 'x' (optimization variables) since the last BFGS update (will be overwritten during this procedure) (pass 'x' for oLBFGS) x_avg_prev : average values of 'x' during the interval of the previous BFGS update (pass 'x_prev' for oLBFGS) n : number of variables (dimensionality of 'x') needs_div : whether x_sum should be divided to obtain the average (pass 0 if it's already an average) bfgs_memory (in, out) : BFGS memory struct nthreads : number of parallel threads to use */ static inline void update_s_vector(real_t x_sum[], real_t x_avg_prev[], int n, int needs_div, bfgs_mem *bfgs_memory, int nthreads) { /* oLBFGS: s = x - x_prev ----not computed here others: s = x_avg - x_avg_prev */ backup_corr_pair(bfgs_memory, n, nthreads); if (needs_div) { average_from_sum(x_sum, bfgs_memory->upd_freq, n); } /* x_sum has now become x_avg --- this is aliased by the preprocessor, so don't worry about it not being declared */ difference_elemwise(bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n, x_avg, x_avg_prev, n, nthreads); } /* Check curvature See if the new correction pair meets a minimum curvature threshold. If it does, accept it (store it), and if not, restore back the old correction pair, which was backed-up during the 'update_s_vector' procedure. bfgs_memory (in, out) : BFGS memory struct n : number of variables (dimensionality of 'x') iter_info : pointer to the indicator on encountered problems nthreads : number of parallel threads to use */ static inline void check_min_curvature(bfgs_mem *bfgs_memory, int n, info_enum *iter_info, int nthreads) { /* s^T * y / s^T * s > epsilon */ real_t *s = bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n;; real_t *y = bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n; real_t curv; if (bfgs_memory->min_curvature > 0) { curv = cblas_tdot(n, s, 1, y, 1) / cblas_tdot(n, s, 1, s, 1); if (curv <= bfgs_memory->min_curvature) { rollback_corr_pair(bfgs_memory, n, iter_info, nthreads); return; } } incr_bfgs_counters(bfgs_memory); } /* Update 'y' correction vector using gradient differences Note: 'x_sum' needs to be reset after this (SQN and adaQN) grad : gradient (at new 'x' on the same batch for oLBFGS, at 'x_avg' on a larger batch for others) grad_prev : previous gradient (at previous 'x' for oLBFGS, at 'x_avg_prev' on the previous large batch for others) bfgs_memory (in, out) : BFGS memory struct n : number of variables (dimensionality of 'x') y_reg : regularization parameter (will add this times 's' to 'y') (pass 0 for SQN and adaQN) iter_info : pointer to the indicator on encountered problems nthreads : number of parallel threads to use */ static inline void update_y_grad_diff(real_t grad[], real_t grad_prev[], bfgs_mem *bfgs_memory, int n, info_enum *iter_info, int nthreads) { /* oLBFGS: y = grad_batch(x) - grad_batch(x_prev) + lambda * s others: y = grad(x_avg) - grad_prev(x_avg_prev) */ real_t *s = bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n;; real_t *y = bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n; difference_elemwise(y, grad, grad_prev, n, nthreads); if (bfgs_memory->y_reg > 0){ cblas_taxpy(n, bfgs_memory->y_reg, s, 1, y, 1); } check_min_curvature(bfgs_memory, n, iter_info, nthreads); } /* Update 'y' correction vector using empirical Fisher matrix (adaQN) fisher_memory : empirical Fisher struct bfgs_memory (in, out) : BFGS memory struct n : number of variables (dimensionality of 'x') iter_info : pointer to the indicator on encountered problems nthreads : number of parallel threads to use */ static inline void update_y_fisher(fisher_mem *fisher_memory, bfgs_mem *bfgs_memory, int n, info_enum *iter_info, int nthreads) { /* y = F' (F * s) / |F| */ real_t *s = bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n; real_t *y = bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n; CBLAS_ORDER c_ord = CblasRowMajor; CBLAS_TRANSPOSE trans_no = CblasNoTrans; CBLAS_TRANSPOSE trans_yes = CblasTrans; cblas_tgemv(c_ord, trans_no, fisher_memory->mem_used, n, 1, fisher_memory->F, n, s, 1, 0, fisher_memory->buffer_y, 1); cblas_tgemv(c_ord, trans_yes, fisher_memory->mem_used, n, 1 / (real_t) fisher_memory->mem_used, fisher_memory->F, n, fisher_memory->buffer_y, 1, 0, y, 1); check_min_curvature(bfgs_memory, n, iter_info, nthreads); } /* Update 'y' correction vector using the production between the Hessian and the 's' vector hess_vec : calculated Hessian * s bfgs_memory (in, out) : BFGS memory struct iter_info : pointer to the indicator on encountered problems n : number of variables (dimensionality of 'x') nthreads : number of parallel threads to use */ static inline void update_y_hessvec(real_t hess_vec[], bfgs_mem *bfgs_memory, info_enum *iter_info, int n, int nthreads) { copy_arr(hess_vec, bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n, n, nthreads); check_min_curvature(bfgs_memory, n, iter_info, nthreads); } /* ============= Optimizer functions for the external API ============= Documentation for them can be found in the header file. These functions are very hard to follow, but think of them like this: each of them will send you to a different part as if it were a 'goto', only there will be an interruption in between where the required calculation is requested externally. Check which part sent you to where you currently are, and where is each part going to send you next. */ int run_oLBFGS(real_t step_size, real_t x[], real_t grad[], real_t **req, task_enum *task, workspace_oLBFGS *oLBFGS, info_enum *iter_info) { *iter_info = no_problems_encountered; /* first run: immediately request a gradient */ if (oLBFGS->section == 0) { *task = calc_grad; *req = x; oLBFGS->section = 1; return 0; } /* second run (main loop): save grad, take a step, save delta_x, request another gradient in same batch */ if (oLBFGS->section == 1) { /* save gradient */ copy_arr(grad, oLBFGS->grad_prev, oLBFGS->n, oLBFGS->nthreads); /* take a step */ take_step(step_size, oLBFGS->n, x, grad, oLBFGS->bfgs_memory, 0, NULL, oLBFGS->hess_init, NULL, 0, oLBFGS->check_nan, iter_info, oLBFGS->nthreads); oLBFGS->niter++; /* store differences in BFGS memory */ if (*iter_info == no_problems_encountered){ backup_corr_pair(oLBFGS->bfgs_memory, oLBFGS->n, oLBFGS->nthreads); /* rollback happens on 'update_y_grad_diff' */ cblas_tscal(oLBFGS->n, -step_size, grad, 1); copy_arr(grad, oLBFGS->bfgs_memory->s_mem + oLBFGS->bfgs_memory->mem_st_ix * oLBFGS->n, oLBFGS->n, oLBFGS->nthreads); /* request another gradient */ *task = calc_grad_same_batch; *req = x; oLBFGS->section = 2; return 1; } else { if (*iter_info == search_direction_was_nan) { flush_bfgs_mem(oLBFGS->bfgs_memory); } *task = calc_grad; *req = x; oLBFGS->section = 1; return 0; } } /* third run (loop): update correction pairs, request a gradient on new batch */ if (oLBFGS->section == 2) { update_y_grad_diff(grad, oLBFGS->grad_prev, oLBFGS->bfgs_memory, oLBFGS->n, iter_info, oLBFGS->nthreads); *task = calc_grad; *req = x; oLBFGS->section = 1; return 0; } *task = invalid_input; fprintf(stderr, "oLBFGS got an invalid workspace as input.\n"); return -1000; } int run_SQN(real_t step_size, real_t x[], real_t grad[], real_t hess_vec[], real_t **req, real_t **req_vec, task_enum *task, workspace_SQN *SQN, info_enum *iter_info) { *iter_info = no_problems_encountered; int return_value = 0; /* first run: immediately request a gradient */ if (SQN->section == 0) { // add_to_sum(x, SQN->x_sum, SQN->n, SQN->nthreads); goto resume_main_loop; } /* second run (main loop): take a step, save sum, see if it's time for creating correction pair */ if (SQN->section == 1) { /* take a step */ take_step(step_size, SQN->n, x, grad, SQN->bfgs_memory, 0, NULL, 0, NULL, 0, SQN->check_nan, iter_info, SQN->nthreads); SQN->niter++; /* check for unchanged parameters */ if (*iter_info == search_direction_was_nan) {return_value = 0;} else {return_value = 1;} /* save sum of new values note: even if they are not updated, need to maintain the sum in the same magnitude, as it will be divided by L */ add_to_sum(x, SQN->x_sum, SQN->n, SQN->nthreads); /* usually, requests a new gradient and returns right here */ if ( (SQN->niter % SQN->bfgs_memory->upd_freq) != 0 ) { goto resume_main_loop; } /* at some intervals, update hessian approx */ /* exception: the first time, just store the averages - if using grad diff, request a long gradient on those, else go back */ if (SQN->niter == SQN->bfgs_memory->upd_freq) { average_from_sum(SQN->x_sum, SQN->bfgs_memory->upd_freq, SQN->n); archive_x_avg(SQN->x_avg, SQN->x_avg_prev, SQN->n, SQN->nthreads); /* note: x_avg is alised by the preprocessor as synonym to x_sum */ if (SQN->use_grad_diff) { *task = calc_grad_big_batch; *req = SQN->x_avg_prev; SQN->section = 2; return return_value; } else { goto resume_main_loop; } } /* first update 's' (turns the sum to avg), but don't reset the sum yet as it'll be needed for a hessian-vec or long grad */ update_s_vector(SQN->x_sum, SQN->x_avg_prev, SQN->n, 1, SQN->bfgs_memory, SQN->nthreads); /* request long grad on the new average */ if (SQN->use_grad_diff) { *task = calc_grad_big_batch; SQN->section = 3; *req = SQN->x_avg; } /* request hessian-vector on the differences between the averages */ else { *task = calc_hess_vec; SQN->section = 4; *req = SQN->x_avg; *req_vec = SQN->bfgs_memory->s_mem + SQN->n * SQN->bfgs_memory->mem_st_ix; } return return_value; } /* third run: got a long gradient on first averages, store it and go back */ if (SQN->section == 2) { copy_arr(grad, SQN->grad_prev, SQN->n, SQN->nthreads); goto resume_main_loop; } /* fourth run (loop): got a long gradient on new averages, reset sum, create correction pair and go back */ if (SQN->section == 3) { update_y_grad_diff(grad, SQN->grad_prev, SQN->bfgs_memory, SQN->n, iter_info, SQN->nthreads); if (*iter_info == no_problems_encountered){ copy_arr(grad, SQN->grad_prev, SQN->n, SQN->nthreads); copy_arr(SQN->x_avg, SQN->x_avg_prev, SQN->n, SQN->nthreads); } set_to_zero(SQN->x_sum, SQN->n, SQN->nthreads); goto resume_main_loop; } /* fifth run (loop): got a hessian-vector product, reset sum, create a correction pair and go back */ if (SQN->section == 4) { archive_x_avg(SQN->x_avg, SQN->x_avg_prev, SQN->n, SQN->nthreads); update_y_hessvec(hess_vec, SQN->bfgs_memory, iter_info, SQN->n, SQN->nthreads); goto resume_main_loop; } *task = invalid_input; fprintf(stderr, "SQN got an invalid workspace as input.\n"); return -1000; resume_main_loop: SQN->section = 1; *task = calc_grad; *req = x; return return_value; } int run_adaQN(real_t step_size, real_t x[], real_t f, real_t grad[], real_t **req, task_enum *task, workspace_adaQN *adaQN, info_enum *iter_info) { *iter_info = no_problems_encountered; int return_value = 0; /* first run: immediately request a gradient */ if (adaQN->section == 0) { // add_to_sum(x, adaQN->x_sum, adaQN->n, adaQN->nthreads); goto resume_main_loop; } /* second run (main loop): store gradient, take a step (gradient_sq is summed there), sum x, see if it's time for creating correction pair --if so, request either long grad or function */ if (adaQN->section == 1) { /* store gradient */ add_to_fisher_mem(grad, adaQN->fisher_memory, adaQN->n, adaQN->nthreads); /* take a step */ take_step(step_size, adaQN->n, x, grad, adaQN->bfgs_memory, adaQN->rmsprop_weight, adaQN->H0, 0, adaQN->grad_sum_sq, adaQN->scal_reg, adaQN->check_nan, iter_info, adaQN->nthreads); if (*iter_info == search_direction_was_nan) { // flush_fisher_mem(adaQN->fisher_memory); return_value = 0; } else { return_value = 1; } adaQN->niter++; /* save sum of new values note: even if they are not updated, need to maintain the sum in the same magnitude, as it will be divided by L */ add_to_sum(x, adaQN->x_sum, adaQN->n, adaQN->nthreads); /* usually, requests a new gradient and returns right here */ if ( (adaQN->niter % adaQN->bfgs_memory->upd_freq) != 0 ) { goto resume_main_loop; } /* at some intervals, update hessian approx */ /* exception: the first time, just store the averages, then: -if use_grad_diff, request a long gradient on the averages (function comes later) -if using max_incr, request a function on the averages -if neither, go back to main loop */ if (adaQN->niter == adaQN->bfgs_memory->upd_freq) { average_from_sum(adaQN->x_sum, adaQN->bfgs_memory->upd_freq, adaQN->n); archive_x_avg(adaQN->x_avg, adaQN->x_avg_prev, adaQN->n, adaQN->nthreads); /* note: x_avg is aliased by the preprocessor as synonym to x_sum */ if (adaQN->use_grad_diff){ *task = calc_grad_big_batch; *req = adaQN->x_avg_prev; adaQN->section = 2; return return_value; } if (adaQN->max_incr > 0){ *task = calc_fun_val_batch; *req = adaQN->x_avg_prev; adaQN->section = 3; return return_value; } goto resume_main_loop; } /* evaluate function on new averages if needed */ if (adaQN->max_incr > 0) { average_from_sum(adaQN->x_sum, adaQN->bfgs_memory->upd_freq, adaQN->n); *task = calc_fun_val_batch; *req = adaQN->x_avg; adaQN->section = 5; return return_value; } /* first update 's' (turns the sum to avg), but don't reset the sum yet as it'll be needed for a hessian-vec or long grad */ update_s_vector(adaQN->x_sum, adaQN->x_avg_prev, adaQN->n, 1, adaQN->bfgs_memory, adaQN->nthreads); goto update_y; } /* third run: got a long gradient on first averages, store it and go back */ if (adaQN->section == 2) { copy_arr(grad, adaQN->grad_prev, adaQN->n, adaQN->nthreads); /* ask for function if needed */ if (adaQN->max_incr){ *task = calc_fun_val_batch; *req = adaQN->x_avg_prev; adaQN->section = 3; return 0; } else { goto resume_main_loop; } } /* fourth run: got first function eval on validation batch, store it and request a gradient */ if (adaQN->section == 3) { adaQN->f_prev = f; goto resume_main_loop; } /* fifth run (loop): got a long gradient on new averages, create correction pair (function was asked before) */ if (adaQN->section == 4){ update_y_grad_diff(grad, adaQN->grad_prev, adaQN->bfgs_memory, adaQN->n, iter_info, adaQN->nthreads); if (*iter_info == no_problems_encountered) { copy_arr(grad, adaQN->grad_prev, adaQN->n, adaQN->nthreads); } set_to_zero(adaQN->x_sum, adaQN->n, adaQN->nthreads); goto resume_main_loop; } /* sixth run (loop): evaluated function on new averages, now see whether to keep correction pair */ if (adaQN->section == 5) { if (f > adaQN->max_incr * adaQN->f_prev || isinf(f) || isnan(f) ) { flush_bfgs_mem(adaQN->bfgs_memory); flush_fisher_mem(adaQN->fisher_memory); copy_arr(adaQN->x_avg_prev, x, adaQN->n, adaQN->nthreads); *iter_info = func_increased; return_value = 1; goto resume_main_loop; } else { adaQN->f_prev = f; update_s_vector(adaQN->x_avg, adaQN->x_avg_prev, adaQN->n, 0, adaQN->bfgs_memory, adaQN->nthreads); goto update_y; } } *task = invalid_input; fprintf(stderr, "adaQN got an invalid workspace as input.\n"); return -1000; update_y: if (adaQN->use_grad_diff) { *req = adaQN->x_avg; *task = calc_grad_big_batch; adaQN->section = 4; return return_value; } else { update_y_fisher(adaQN->fisher_memory, adaQN->bfgs_memory, adaQN->n, iter_info, adaQN->nthreads); if (*iter_info == no_problems_encountered) { copy_arr(adaQN->x_avg, adaQN->x_avg_prev, adaQN->n, adaQN->nthreads); } set_to_zero(adaQN->x_sum, adaQN->n, adaQN->nthreads); goto resume_main_loop; } resume_main_loop: adaQN->section = 1; *task = calc_grad; *req = x; return return_value; } #ifdef __cplusplus } #endif
04_loop_decompose_broken.c
//IMPORTANT NOTE! THIS CODE DOESN'T WORK PROPERLY #include <stdio.h> #include <omp.h> #define MAX_ITS 10000 int main() { int nproc, i, sum, thread_id; nproc = omp_get_max_threads(); int its_per_proc[nproc]; for (i = 0; i< nproc; ++i){ its_per_proc[i] = 0; } #pragma omp parallel for for (i = 0; i< MAX_ITS; ++i){ /*This line isn't safe because by default there is only one thread_id variable and all of the threads are competing to write to it*/ thread_id = omp_get_thread_num(); //Which element of its_per_proc is written to here is pretty much random its_per_proc[thread_id]++; } sum = 0; for (i = 0; i< nproc; ++i){ printf("Processor %i performed %i iterations\n", i, its_per_proc[i]); sum += its_per_proc[i]; } printf("Total work on all processors is %i\n", sum); }
gramschmidt.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 512. */ #include "gramschmidt.h" /* Array initialization. */ static void init_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(R,NJ,NJ,nj,nj), DATA_TYPE POLYBENCH_2D(Q,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) i*j) / ni; Q[i][j] = ((DATA_TYPE) i*(j+1)) / nj; } for (i = 0; i < nj; i++) for (j = 0; j < nj; j++) R[i][j] = ((DATA_TYPE) i*(j+2)) / nj; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(R,NJ,NJ,nj,nj), DATA_TYPE POLYBENCH_2D(Q,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, A[i][j]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); for (i = 0; i < nj; i++) for (j = 0; j < nj; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, R[i][j]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, Q[i][j]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_gramschmidt(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(R,NJ,NJ,nj,nj), DATA_TYPE POLYBENCH_2D(Q,NI,NJ,ni,nj)) { int i, j, k; DATA_TYPE nrm; #pragma scop #pragma omp parallel for private (i, j) for (k = 0; k < _PB_NJ; k++) { nrm = 0; for (i = 0; i < _PB_NI; i++) nrm += A[i][k] * A[i][k]; R[k][k] = sqrt(nrm); for (i = 0; i < _PB_NI; i++) Q[i][k] = A[i][k] / R[k][k]; for (j = k + 1; j < _PB_NJ; j++) { R[k][j] = 0; for (i = 0; i < _PB_NI; i++) R[k][j] += Q[i][k] * A[i][j]; for (i = 0; i < _PB_NI; i++) A[i][j] = A[i][j] - Q[i][k] * R[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,NI,NJ,ni,nj); POLYBENCH_2D_ARRAY_DECL(R,DATA_TYPE,NJ,NJ,nj,nj); POLYBENCH_2D_ARRAY_DECL(Q,DATA_TYPE,NI,NJ,ni,nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(R), POLYBENCH_ARRAY(Q)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_gramschmidt (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(R), POLYBENCH_ARRAY(Q)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(R), POLYBENCH_ARRAY(Q))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(R); POLYBENCH_FREE_ARRAY(Q); return 0; }
task-barrier.c
/* Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze (joachim.protze@tu-dresden.de), Jonas Hahnfeld (hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin Schulz. LLNL-CODE-773957 All rights reserved. This file is part of Archer. For details, see https://pruners.github.io/archer. Please also read https://github.com/PRUNERS/archer/blob/master/LICENSE. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char* argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) { #pragma omp master { #pragma omp task shared(var) { var++; } // Give other thread time to steal the task. sleep(1); } #pragma omp barrier #pragma omp master { var++; } } fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK: DONE
convolution_sgemm_pack1to8_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack1to8_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 16) tmp.create(16 * maxk, inch / 8 + inch % 8, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch / 8 + inch % 8, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch / 8 + inch % 8, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 1, opt.workspace_allocator); } #else // __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 4) tmp.create(4 * maxk, inch / 8 + inch % 8, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ if (inch >= 8) { if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif // __aarch64__ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char* tmpptr = tmp.channel(i / 16); int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.16b}, [%0] \n" "ld1 {v1.16b}, [%1] \n" "ld1 {v2.16b}, [%2] \n" "ld1 {v3.16b}, [%3] \n" "ld1 {v4.16b}, [%4] \n" "ld1 {v5.16b}, [%5] \n" "ld1 {v6.16b}, [%6] \n" "ld1 {v7.16b}, [%7] \n" "st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%8], #64 \n" "st4 {v4.16b, v5.16b, v6.16b, v7.16b}, [%8], #64 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.8b}, [%0] \n" "ld1 {v1.8b}, [%1] \n" "ld1 {v2.8b}, [%2] \n" "ld1 {v3.8b}, [%3] \n" "ld1 {v4.8b}, [%4] \n" "ld1 {v5.8b}, [%5] \n" "ld1 {v6.8b}, [%6] \n" "ld1 {v7.8b}, [%7] \n" "st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%8], #32 \n" "st4 {v4.8b, v5.8b, v6.8b, v7.8b}, [%8], #32 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else // __ARM_FEATURE_DOTPROD int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif // __ARM_FEATURE_DOTPROD #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char* tmpptr = tmp.channel(i / 4); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img0[3]; tmpptr[5] = img1[3]; tmpptr[6] = img2[3]; tmpptr[7] = img3[3]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img4[2]; tmpptr[1] = img5[2]; tmpptr[2] = img6[2]; tmpptr[3] = img7[2]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img4[2]; tmpptr[5] = img5[2]; tmpptr[6] = img6[2]; tmpptr[7] = img7[2]; tmpptr += 8; tmpptr[0] = img0[3]; tmpptr[1] = img1[3]; tmpptr[2] = img2[3]; tmpptr[3] = img3[3]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #endif // __ARM_FEATURE_DOTPROD img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char* tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #endif // __ARM_FEATURE_DOTPROD img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "cmp %w1, #0 \n" "beq 1f \n" "ld1 {v8.16b}, [%4], #16 \n" // _w0123_l "ld1 {v0.16b}, [%3], #16 \n" // _val0123_l "0: \n" "ld1 {v1.16b}, [%3], #16 \n" // _val4567_l "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v0.4b[2] \n" "sdot v19.4s, v8.16b, v0.4b[3] \n" "ld1 {v2.16b}, [%3], #16 \n" // _val891011_l "sdot v20.4s, v8.16b, v1.4b[0] \n" "sdot v21.4s, v8.16b, v1.4b[1] \n" "sdot v22.4s, v8.16b, v1.4b[2] \n" "sdot v23.4s, v8.16b, v1.4b[3] \n" "ld1 {v3.16b}, [%3], #16 \n" // _val12131415_l "sdot v24.4s, v8.16b, v2.4b[0] \n" "sdot v25.4s, v8.16b, v2.4b[1] \n" "ld1 {v9.16b}, [%4], #16 \n" // _w0123_h "sdot v26.4s, v8.16b, v2.4b[2] \n" "sdot v27.4s, v8.16b, v2.4b[3] \n" "ld1 {v4.16b}, [%3], #16 \n" // _val0123_h "sdot v28.4s, v8.16b, v3.4b[0] \n" "sdot v29.4s, v8.16b, v3.4b[1] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "ld1 {v5.16b}, [%3], #16 \n" // _val4567_h "sdot v16.4s, v9.16b, v4.4b[0] \n" "sdot v17.4s, v9.16b, v4.4b[1] \n" "sdot v18.4s, v9.16b, v4.4b[2] \n" "sdot v19.4s, v9.16b, v4.4b[3] \n" "ld1 {v6.16b}, [%3], #16 \n" // _val891011_h "sdot v20.4s, v9.16b, v5.4b[0] \n" "sdot v21.4s, v9.16b, v5.4b[1] \n" "sdot v22.4s, v9.16b, v5.4b[2] \n" "sdot v23.4s, v9.16b, v5.4b[3] \n" "ld1 {v7.16b}, [%3], #16 \n" // _val12131415_h "sdot v24.4s, v9.16b, v6.4b[0] \n" "sdot v25.4s, v9.16b, v6.4b[1] \n" "ld1 {v8.16b}, [%4], #16 \n" // _w0123_l "sdot v26.4s, v9.16b, v6.4b[2] \n" "sdot v27.4s, v9.16b, v6.4b[3] \n" "ld1 {v0.16b}, [%3], #16 \n" // _val0123_l "sdot v28.4s, v9.16b, v7.4b[0] \n" "sdot v29.4s, v9.16b, v7.4b[1] \n" "subs %w1, %w1, #1 \n" "sdot v30.4s, v9.16b, v7.4b[2] \n" "sdot v31.4s, v9.16b, v7.4b[3] \n" "bne 0b \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" "1: \n" "lsr w4, %w2, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "ld1 {v8.8b, v9.8b}, [%4], #16 \n" "ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%3], #64 \n" "uzp1 v10.8b, v8.8b, v9.8b \n" "uzp2 v11.8b, v8.8b, v9.8b \n" "uzp1 v4.16b, v0.16b, v1.16b \n" "uzp2 v5.16b, v0.16b, v1.16b \n" "uzp1 v6.16b, v2.16b, v3.16b \n" "uzp2 v7.16b, v2.16b, v3.16b \n" "uzp1 v8.8b, v10.8b, v11.8b \n" "uzp2 v9.8b, v10.8b, v11.8b \n" "uzp1 v0.16b, v4.16b, v5.16b \n" // 0 1 4 5 "uzp2 v1.16b, v4.16b, v5.16b \n" // 8 9 c d "mov v8.d[1], v9.d[0] \n" // _w "uzp1 v2.16b, v6.16b, v7.16b \n" // 2 3 6 7 "uzp2 v3.16b, v6.16b, v7.16b \n" // a b e f "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v2.4b[0] \n" "sdot v19.4s, v8.16b, v2.4b[1] \n" "sdot v20.4s, v8.16b, v0.4b[2] \n" "sdot v21.4s, v8.16b, v0.4b[3] \n" "sdot v22.4s, v8.16b, v2.4b[2] \n" "sdot v23.4s, v8.16b, v2.4b[3] \n" "sdot v24.4s, v8.16b, v1.4b[0] \n" "sdot v25.4s, v8.16b, v1.4b[1] \n" "sdot v26.4s, v8.16b, v3.4b[0] \n" "sdot v27.4s, v8.16b, v3.4b[1] \n" "sdot v28.4s, v8.16b, v1.4b[2] \n" "sdot v29.4s, v8.16b, v1.4b[3] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "and w4, %w2, #3 \n" // w4 = remain = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 5f \n" "4: \n" "ld1 {v1.8b}, [%4] \n" "ld1 {v0.16b}, [%3] \n" "sshll v1.8h, v1.8b, #0 \n" "sshll v2.8h, v0.8b, #0 \n" "sshll2 v3.8h, v0.16b, #0 \n" "smlal v16.4s, v1.4h, v2.h[0] \n" "smlal v17.4s, v1.4h, v2.h[1] \n" "smlal v18.4s, v1.4h, v2.h[2] \n" "smlal v19.4s, v1.4h, v2.h[3] \n" "smlal v20.4s, v1.4h, v2.h[4] \n" "smlal v21.4s, v1.4h, v2.h[5] \n" "smlal v22.4s, v1.4h, v2.h[6] \n" "smlal v23.4s, v1.4h, v2.h[7] \n" "smlal v24.4s, v1.4h, v3.h[0] \n" "smlal v25.4s, v1.4h, v3.h[1] \n" "smlal v26.4s, v1.4h, v3.h[2] \n" "smlal v27.4s, v1.4h, v3.h[3] \n" "smlal v28.4s, v1.4h, v3.h[4] \n" "smlal v29.4s, v1.4h, v3.h[5] \n" "smlal v30.4s, v1.4h, v3.h[6] \n" "smlal v31.4s, v1.4h, v3.h[7] \n" "add %3, %3, #16 \n" "add %4, %4, #4 \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(nn1), "3"(tmpptr), "4"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x8x4_t _val4 = vld4_s8(tmpptr); int8x8x2_t _val0145 = vuzp_s8(_val4.val[0], _val4.val[1]); int8x8x2_t _val2367 = vuzp_s8(_val4.val[2], _val4.val[3]); int8x16_t _val0123 = vcombine_s8(_val0145.val[0], _val2367.val[0]); int8x16_t _val4567 = vcombine_s8(_val0145.val[1], _val2367.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123f, _val4567, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123f, _val4567, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123f, _val4567, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123f, _val4567, 3); tmpptr += 32; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _val4 = vdup_n_s16(tmpptr[4]); int16x4_t _val5 = vdup_n_s16(tmpptr[5]); int16x4_t _val6 = vdup_n_s16(tmpptr[6]); int16x4_t _val7 = vdup_n_s16(tmpptr[7]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); _sum4 = vmlal_s16(_sum4, _val4, _w0123); _sum5 = vmlal_s16(_sum5, _val5, _w0123); _sum6 = vmlal_s16(_sum6, _val6, _w0123); _sum7 = vmlal_s16(_sum7, _val7, _w0123); tmpptr += 8; kptr0 += 4; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); vst1q_s32(outptr0 + 16, _sum4); vst1q_s32(outptr0 + 20, _sum5); vst1q_s32(outptr0 + 24, _sum6); vst1q_s32(outptr0 + 28, _sum7); outptr0 += 32; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x16_t _val = vld1q_s8(tmpptr); int8x8x2_t _val01 = vuzp_s8(vget_low_s8(_val), vget_high_s8(_val)); int8x8x2_t _val0123 = vuzp_s8(_val01.val[0], _val01.val[1]); int8x16_t _val0123f = vcombine_s8(_val0123.val[0], _val0123.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123f, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123f, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123f, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123f, 3); tmpptr += 16; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); tmpptr += 4; kptr0 += 4; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; #else // __ARM_FEATURE_DOTPROD asm volatile( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "cmp %w1, #0 \n" "beq 3f \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%3, #128] \n" "prfm pldl1keep, [%4, #256] \n" "lsr w4, %w1, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%4, #512] \n" "add x5, %3, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%3] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%4], #64 \n" "add %3, %3, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%3] \n" "add %3, %3, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%4, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%4, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%3] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %3, %3, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%3] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %3, %3, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%4], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%3, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%3, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %3, %3, #64 \n" "sub %4, %4, #64 \n" "1: \n" "and w4, %w1, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%3], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%4], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%3], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v1.4s \n" "addp v2.4s, v2.4s, v3.4s \n" "addp v4.4s, v4.4s, v5.4s \n" "addp v6.4s, v6.4s, v7.4s \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "addp v0.4s, v0.4s, v2.4s \n" "addp v1.4s, v4.4s, v6.4s \n" "addp v2.4s, v8.4s, v10.4s \n" "addp v3.4s, v12.4s, v14.4s \n" "3: \n" "lsr w4, %w2, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v8.16b}, [%3], #16 \n" "ld1 {v9.16b}, [%4], #16 \n" "sshll v4.8h, v8.8b, #0 \n" "sshll2 v5.8h, v8.16b, #0 \n" "sshll v6.8h, v9.8b, #0 \n" "sshll2 v7.8h, v9.16b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "smlal2 v0.4s, v6.8h, v4.h[4] \n" "smlal2 v1.4s, v6.8h, v4.h[5] \n" "smlal2 v2.4s, v6.8h, v4.h[6] \n" "smlal2 v3.4s, v6.8h, v4.h[7] \n" "smlal v0.4s, v7.4h, v5.h[0] \n" "smlal v1.4s, v7.4h, v5.h[1] \n" "smlal v2.4s, v7.4h, v5.h[2] \n" "smlal v3.4s, v7.4h, v5.h[3] \n" "smlal2 v0.4s, v7.8h, v5.h[4] \n" "smlal2 v1.4s, v7.8h, v5.h[5] \n" "smlal2 v2.4s, v7.8h, v5.h[6] \n" "smlal2 v3.4s, v7.8h, v5.h[7] \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" "and w4, %w2, #3 \n" // w4 = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "ld1 {v4.8b}, [%3] \n" "ld1 {v6.8b}, [%4] \n" "sshll v4.8h, v4.8b, #0 \n" "sshll v6.8h, v6.8b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "add %3, %3, #4 \n" "add %4, %4, #4 \n" "subs w4, w4, #1 \n" "bne 6b \n" "7: \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(outptr0), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(nn1), "3"(tmpptr), "4"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif // __ARM_FEATURE_DOTPROD } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; #if __aarch64__ int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum00 = vdotq_laneq_s32(_sum00, _w0123_l, _val01_l_h, 0); _sum10 = vdotq_laneq_s32(_sum10, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum00 = vdotq_laneq_s32(_sum00, _w0123_h, _val01_l_h, 2); _sum10 = vdotq_laneq_s32(_sum10, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } #else // __ARM_FEATURE_DOTPROD if (nn > 0) { int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); _sum00 = vpaddq_s32(_s001, _s023); _sum10 = vpaddq_s32(_s101, _s123); } #endif // __ARM_FEATURE_DOTPROD int j = 0; for (; j + 3 < nn1; j += 4) { int16x8_t _val01234567 = vmovl_s8(vld1_s8(tmpptr)); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum00 = vmlal_laneq_s16(_sum00, _w0123, _val01234567, 0); _sum10 = vmlal_laneq_s16(_sum10, _w0123, _val01234567, 1); _sum00 = vmlal_laneq_s16(_sum00, _w4567, _val01234567, 2); _sum10 = vmlal_laneq_s16(_sum10, _w4567, _val01234567, 3); _sum00 = vmlal_laneq_s16(_sum00, _w89ab, _val01234567, 4); _sum10 = vmlal_laneq_s16(_sum10, _w89ab, _val01234567, 5); _sum00 = vmlal_laneq_s16(_sum00, _wcdef, _val01234567, 6); _sum10 = vmlal_laneq_s16(_sum10, _wcdef, _val01234567, 7); tmpptr += 8; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum00 = vmlal_s16(_sum00, _val0, _w0123); _sum10 = vmlal_s16(_sum10, _val1, _w0123); tmpptr += 2; kptr0 += 4; } vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum10); outptr0 += 8; #else // __aarch64__ asm volatile( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "cmp %1, #0 \n" "beq 3f \n" "pld [%3, #256] \n" "lsr r4, %1, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %4, #16 \n" "pld [%4, #128] \n" "mov r6, #32 \n" "pld [%4, #384] \n" "vld1.s8 {d20-d21}, [%4 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%3 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%4 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%3, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%4, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%3 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%4 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%3 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%4 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%3, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%4, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %3, %3, #32 \n" "sub %4, %4, #64 \n" "1: \n" "and r4, %1, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%3 :128]! \n" // _val "vld1.s8 {d20-d21}, [%4 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%4 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "3: \n" "lsr r4, %2, #2 \n" // r4 = nn1 >> 2 "cmp r4, #0 \n" "beq 5f \n" "4: \n" "vld1.s8 {d4}, [%3]! \n" "vmovl.s8 q2, d4 \n" "vld1.s8 {d10-d11}, [%4]! \n" "vmovl.s8 q3, d10 \n" "vmovl.s8 q4, d11 \n" "vmlal.s16 q0, d6, d4[0] \n" "vmlal.s16 q1, d6, d4[1] \n" "vmlal.s16 q0, d7, d4[2] \n" "vmlal.s16 q1, d7, d4[3] \n" "vmlal.s16 q0, d8, d5[0] \n" "vmlal.s16 q1, d8, d5[1] \n" "vmlal.s16 q0, d9, d5[2] \n" "vmlal.s16 q1, d9, d5[3] \n" "subs r4, r4, #1 \n" "bne 4b \n" "5: \n" "and r4, %2, #3 \n" // r4 = nn1 & 3 "cmp r4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "vld1.s8 {d4[]}, [%3]! \n" "vld1.s8 {d6[]}, [%3]! \n" "vmovl.s8 q2, d4 \n" "vmovl.s8 q3, d6 \n" "vld1.s8 {d8}, [%4] \n" "vmovl.s8 q4, d8 \n" "vmlal.s16 q0, d4, d8 \n" "vmlal.s16 q1, d6, d8 \n" "add %4, %4, #4 \n" "subs r4, r4, #1 \n" "bne 6b \n" "7: \n" "vst1.s32 {d0-d3}, [%0 :128]! \n" : "=r"(outptr0), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(nn1), "3"(tmpptr), "4"(kptr0) : "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } #else // __ARM_FEATURE_DOTPROD if (nn > 0) { int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); _sum0 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); _sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif } #endif // __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 3 < nn1; j += 4) { int16x4_t _val0123 = vget_low_s16(vmovl_s8(vld1_s8(tmpptr))); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum0 = vmlal_lane_s16(_sum0, _w0123, _val0123, 0); _sum1 = vmlal_lane_s16(_sum1, _w4567, _val0123, 1); _sum0 = vmlal_lane_s16(_sum0, _w89ab, _val0123, 2); _sum1 = vmlal_lane_s16(_sum1, _wcdef, _val0123, 3); tmpptr += 4; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val = vdup_n_s16(tmpptr[0]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val, _w0123); tmpptr += 1; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_s32(outptr0, _sum0); outptr0 += 4; } } } static void convolution_im2col_sgemm_transform_kernel_pack1to8_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b // dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); if (inch >= 8) kernel_tm.create(32 * maxk, inch / 8 + inch % 8, outch / 4, 1u); else kernel_tm.create(4 * maxk, inch, outch / 4, 1u); for (int q = 0; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #endif } } // TODO unroll 4 for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack1to8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to8_int8_neon(bottom_im2col, top_blob, kernel, opt); }
parallel_for.h
/*! * Copyright (c) 2021 by Contributors * \file parallel_for.h * \brief Implemenation of parallel for loop * \author Hyunsu Cho */ #ifndef TREELITE_THREADING_UTILS_PARALLEL_FOR_H_ #define TREELITE_THREADING_UTILS_PARALLEL_FOR_H_ #include <treelite/omp.h> #include <treelite/logging.h> #include <type_traits> #include <algorithm> #include <exception> #include <mutex> #include <cstddef> #include <cstdint> namespace treelite { namespace threading_utils { /*! * \brief OMP Exception class catches, saves and rethrows exception from OMP blocks */ class OMPException { private: // exception_ptr member to store the exception std::exception_ptr omp_exception_; // mutex to be acquired during catch to set the exception_ptr std::mutex mutex_; public: /*! * \brief Parallel OMP blocks should be placed within Run to save exception */ template <typename Function, typename... Parameters> void Run(Function f, Parameters... params) { try { f(params...); } catch (std::exception& ex) { std::lock_guard<std::mutex> lock(mutex_); if (!omp_exception_) { omp_exception_ = std::current_exception(); } } } /*! * \brief should be called from the main thread to rethrow the exception */ void Rethrow() { if (this->omp_exception_) { std::rethrow_exception(this->omp_exception_); } } }; inline int OmpGetThreadLimit() { int limit = omp_get_thread_limit(); TREELITE_CHECK_GE(limit, 1) << "Invalid thread limit for OpenMP."; return limit; } inline int MaxNumThread() { return std::min(std::min(omp_get_num_procs(), omp_get_max_threads()), OmpGetThreadLimit()); } /*! * \brief Represent thread configuration, to be used with parallel loops. */ struct ThreadConfig { std::uint32_t nthread; }; /*! * \brief Create therad configuration. * @param nthread Number of threads to use. If \<= 0, use all available threads. This value is * validated to ensure that it's in a valid range. * @return Thread configuration */ inline ThreadConfig ConfigureThreadConfig(int nthread) { if (nthread <= 0) { nthread = MaxNumThread(); TREELITE_CHECK_GE(nthread, 1) << "Invalid number of threads configured in OpenMP"; } else { TREELITE_CHECK_LE(nthread, MaxNumThread()) << "nthread cannot exceed " << MaxNumThread() << " (configured by OpenMP)."; } return ThreadConfig{static_cast<std::uint32_t>(nthread)}; } // OpenMP schedule struct ParallelSchedule { enum { kAuto, kDynamic, kStatic, kGuided, } sched; std::size_t chunk{0}; ParallelSchedule static Auto() { return ParallelSchedule{kAuto}; } ParallelSchedule static Dynamic(std::size_t n = 0) { return ParallelSchedule{kDynamic, n}; } ParallelSchedule static Static(std::size_t n = 0) { return ParallelSchedule{kStatic, n}; } ParallelSchedule static Guided() { return ParallelSchedule{kGuided}; } }; template <typename IndexType, typename FuncType> inline void ParallelFor(IndexType begin, IndexType end, const ThreadConfig& thread_config, ParallelSchedule sched, FuncType func) { if (begin == end) { return; } #if defined(_MSC_VER) // msvc doesn't support unsigned integer as openmp index. using OmpInd = std::conditional_t<std::is_signed<IndexType>::value, IndexType, std::int64_t>; #else using OmpInd = IndexType; #endif OMPException exc; switch (sched.sched) { case ParallelSchedule::kAuto: { #pragma omp parallel for num_threads(thread_config.nthread) for (OmpInd i = begin; i < end; ++i) { exc.Run(func, static_cast<IndexType>(i), omp_get_thread_num()); } break; } case ParallelSchedule::kDynamic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(thread_config.nthread) schedule(dynamic) for (OmpInd i = begin; i < end; ++i) { exc.Run(func, static_cast<IndexType>(i), omp_get_thread_num()); } } else { #pragma omp parallel for num_threads(thread_config.nthread) schedule(dynamic, sched.chunk) for (OmpInd i = begin; i < end; ++i) { exc.Run(func, static_cast<IndexType>(i), omp_get_thread_num()); } } break; } case ParallelSchedule::kStatic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(thread_config.nthread) schedule(static) for (OmpInd i = begin; i < end; ++i) { exc.Run(func, static_cast<IndexType>(i), omp_get_thread_num()); } } else { #pragma omp parallel for num_threads(thread_config.nthread) schedule(static, sched.chunk) for (OmpInd i = begin; i < end; ++i) { exc.Run(func, static_cast<IndexType>(i), omp_get_thread_num()); } } break; } case ParallelSchedule::kGuided: { #pragma omp parallel for num_threads(thread_config.nthread) schedule(guided) for (OmpInd i = begin; i < end; ++i) { exc.Run(func, static_cast<IndexType>(i), omp_get_thread_num()); } break; } } exc.Rethrow(); } } // namespace threading_utils } // namespace treelite #endif // TREELITE_THREADING_UTILS_PARALLEL_FOR_H_
GB_binop__cmplx_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp64) // C=scalar+B GB (_bind1st__cmplx_fp64) // C=scalar+B' GB (_bind1st_tran__cmplx_fp64) // C=A+scalar GB (_bind2nd__cmplx_fp64) // C=A'+scalar GB (_bind2nd_tran__cmplx_fp64) // C type: GxB_FC64_t // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = GxB_CMPLX (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GxB_CMPLX (GBX (Ax, pA, A_iso), 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GxB_CMPLX (GBX (Bx, pB, B_iso), 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GxB_CMPLX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CMPLX || GxB_NO_FP64 || GxB_NO_CMPLX_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__cmplx_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__cmplx_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__cmplx_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__cmplx_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__cmplx_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = GxB_CMPLX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__cmplx_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = GxB_CMPLX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GxB_CMPLX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__cmplx_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GxB_CMPLX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ParallelEndLink.c
int x; int main() { #pragma omp parallel { int x; 123; } #pragma omp parallel { 123; } }
3dMath.h
/* Public Domain / CC0 C99 Vector Math Library */ #ifndef CHAD_MATH_H #define CHAD_MATH_H /* Default behavior- compatibility. */ #ifndef CHAD_MATH_NO_ALIGN #define CHAD_MATH_NO_ALIGN #endif #ifdef __TINYC__ #define CHAD_MATH_NO_ALIGN #endif #ifndef CHAD_MATH_NO_ALIGN #include <stdalign.h> #define CHAD_ALIGN alignas(16) #warning "Chad math library compiling with alignas of 16, malloc and realloc MUST return 16-byte-aligned pointers." #else #define CHAD_ALIGN /*a comment*/ #endif #include <math.h> #include <string.h> typedef float f_; typedef unsigned int uint; #define MAX(x,y) (x>y?x:y) #define MIN(x,y) (x<y?x:y) typedef struct {CHAD_ALIGN f_ d[3];} vec3; typedef struct {CHAD_ALIGN int d[3];} ivec3; typedef struct {CHAD_ALIGN f_ d[4];} vec4; typedef struct {CHAD_ALIGN f_ d[16];} mat4; /*Collision detection These Algorithms return the penetration vector into the shape in the first argument With depth of penetration in element 4 if depth of penetration is zero or lower then there is no penetration. */ typedef struct{ vec4 c; vec3 e; }aabb; typedef aabb colshape; /*c.d[3] determines if it's a sphere or box. 0 or less = box, greater than 0 = sphere*/ static inline mat4 scalemat4( vec4 s){ mat4 ret; for(int i = 1; i < 16; i++) ret.d[i]= 0.0; ret.d[0*4 + 0] = s.d[0]; ret.d[1*4 + 1] = s.d[1]; ret.d[2*4 + 2] = s.d[2]; ret.d[3*4 + 3] = s.d[3]; return ret; } static inline int invmat4( mat4 m, mat4* invOut) /*returns 1 if successful*/ { mat4 inv; f_ det; int i; inv.d[0] = m.d[5] * m.d[10] * m.d[15] - m.d[5] * m.d[11] * m.d[14] - m.d[9] * m.d[6] * m.d[15] + m.d[9] * m.d[7] * m.d[14] + m.d[13] * m.d[6] * m.d[11] - m.d[13] * m.d[7] * m.d[10]; inv.d[4] = -m.d[4] * m.d[10] * m.d[15] + m.d[4] * m.d[11] * m.d[14] + m.d[8] * m.d[6] * m.d[15] - m.d[8] * m.d[7] * m.d[14] - m.d[12] * m.d[6] * m.d[11] + m.d[12] * m.d[7] * m.d[10]; inv.d[8] = m.d[4] * m.d[9] * m.d[15] - m.d[4] * m.d[11] * m.d[13] - m.d[8] * m.d[5] * m.d[15] + m.d[8] * m.d[7] * m.d[13] + m.d[12] * m.d[5] * m.d[11] - m.d[12] * m.d[7] * m.d[9]; inv.d[12] = -m.d[4] * m.d[9] * m.d[14] + m.d[4] * m.d[10] * m.d[13] + m.d[8] * m.d[5] * m.d[14] - m.d[8] * m.d[6] * m.d[13] - m.d[12] * m.d[5] * m.d[10] + m.d[12] * m.d[6] * m.d[9]; inv.d[1] = -m.d[1] * m.d[10] * m.d[15] + m.d[1] * m.d[11] * m.d[14] + m.d[9] * m.d[2] * m.d[15] - m.d[9] * m.d[3] * m.d[14] - m.d[13] * m.d[2] * m.d[11] + m.d[13] * m.d[3] * m.d[10]; inv.d[5] = m.d[0] * m.d[10] * m.d[15] - m.d[0] * m.d[11] * m.d[14] - m.d[8] * m.d[2] * m.d[15] + m.d[8] * m.d[3] * m.d[14] + m.d[12] * m.d[2] * m.d[11] - m.d[12] * m.d[3] * m.d[10]; inv.d[9] = -m.d[0] * m.d[9] * m.d[15] + m.d[0] * m.d[11] * m.d[13] + m.d[8] * m.d[1] * m.d[15] - m.d[8] * m.d[3] * m.d[13] - m.d[12] * m.d[1] * m.d[11] + m.d[12] * m.d[3] * m.d[9]; inv.d[13] = m.d[0] * m.d[9] * m.d[14] - m.d[0] * m.d[10] * m.d[13] - m.d[8] * m.d[1] * m.d[14] + m.d[8] * m.d[2] * m.d[13] + m.d[12] * m.d[1] * m.d[10] - m.d[12] * m.d[2] * m.d[9]; inv.d[2] = m.d[1] * m.d[6] * m.d[15] - m.d[1] * m.d[7] * m.d[14] - m.d[5] * m.d[2] * m.d[15] + m.d[5] * m.d[3] * m.d[14] + m.d[13] * m.d[2] * m.d[7] - m.d[13] * m.d[3] * m.d[6]; inv.d[6] = -m.d[0] * m.d[6] * m.d[15] + m.d[0] * m.d[7] * m.d[14] + m.d[4] * m.d[2] * m.d[15] - m.d[4] * m.d[3] * m.d[14] - m.d[12] * m.d[2] * m.d[7] + m.d[12] * m.d[3] * m.d[6]; inv.d[10] = m.d[0] * m.d[5] * m.d[15] - m.d[0] * m.d[7] * m.d[13] - m.d[4] * m.d[1] * m.d[15] + m.d[4] * m.d[3] * m.d[13] + m.d[12] * m.d[1] * m.d[7] - m.d[12] * m.d[3] * m.d[5]; inv.d[14] = -m.d[0] * m.d[5] * m.d[14] + m.d[0] * m.d[6] * m.d[13] + m.d[4] * m.d[1] * m.d[14] - m.d[4] * m.d[2] * m.d[13] - m.d[12] * m.d[1] * m.d[6] + m.d[12] * m.d[2] * m.d[5]; inv.d[3] = -m.d[1] * m.d[6] * m.d[11] + m.d[1] * m.d[7] * m.d[10] + m.d[5] * m.d[2] * m.d[11] - m.d[5] * m.d[3] * m.d[10] - m.d[9] * m.d[2] * m.d[7] + m.d[9] * m.d[3] * m.d[6]; inv.d[7] = m.d[0] * m.d[6] * m.d[11] - m.d[0] * m.d[7] * m.d[10] - m.d[4] * m.d[2] * m.d[11] + m.d[4] * m.d[3] * m.d[10] + m.d[8] * m.d[2] * m.d[7] - m.d[8] * m.d[3] * m.d[6]; inv.d[11] = -m.d[0] * m.d[5] * m.d[11] + m.d[0] * m.d[7] * m.d[9] + m.d[4] * m.d[1] * m.d[11] - m.d[4] * m.d[3] * m.d[9] - m.d[8] * m.d[1] * m.d[7] + m.d[8] * m.d[3] * m.d[5]; inv.d[15] = m.d[0] * m.d[5] * m.d[10] - m.d[0] * m.d[6] * m.d[9] - m.d[4] * m.d[1] * m.d[10] + m.d[4] * m.d[2] * m.d[9] + m.d[8] * m.d[1] * m.d[6] - m.d[8] * m.d[2] * m.d[5]; det = m.d[0] * inv.d[0] + m.d[1] * inv.d[4] + m.d[2] * inv.d[8] + m.d[3] * inv.d[12]; if (det == 0) return 0; det = 1.0 / det; for (i = 0; i < 16; i++) invOut->d[i] = inv.d[i] * det; return 1; } static inline mat4 perspective( f_ fov, f_ aspect, f_ near, f_ far){ mat4 ret; f_ D2R = 3.14159265358979323 / 180.0; f_ yScale = 1.0/tanf(D2R * fov/2); f_ xScale = yScale/aspect; f_ nearmfar = near-far; ret.d[0*4+0] = xScale; ret.d[0*4+1]=0; ret.d[0*4+2]=0; ret.d[0*4+3]=0; ret.d[1*4+0]=0; ret.d[1*4+1]=yScale;ret.d[1*4+2]=0; ret.d[1*4+3]=0; ret.d[2*4+0]=0; ret.d[2*4+1]=0; ret.d[2*4+2]=(far+near)/nearmfar;ret.d[2*4+3]=-1; ret.d[3*4+0]=0; ret.d[3*4+1]=0; ret.d[3*4+2]=2*far*near/nearmfar;ret.d[3*4+3]=0; /* ret.d[0*4+0] = xScale; ret.d[0*4+1]=0; ret.d[0*4+2]=0; ret.d[0*4+3]=0; ret.d[1*4+0]=0; ret.d[1*4+1]=yScale;ret.d[1*4+2]=0; ret.d[1*4+3]=0; ret.d[2*4+0]=0; ret.d[2*4+1]=0; ret.d[2*4+2]=(far+near)/nearmfar; ret.d[2*4+3]=2*far*near/nearmfar; ret.d[3*4+0]=0; ret.d[3*4+1]=0; ret.d[3*4+2]=-1; ret.d[3*4+3]=0; */ return ret; } static inline vec3 viewport( uint xdim, uint ydim, vec3 input){ input.d[0] += 1; input.d[1] += 1; input.d[0] *= (f_)xdim / 2.0; input.d[1] *= (f_)ydim / 2.0; input.d[2] = (input.d[2])/2.0; return input; } static inline mat4 rotate( vec3 rotation){ f_ a = rotation.d[0]; f_ b = rotation.d[1]; f_ c = rotation.d[2]; mat4 rm; rm.d[0*4 + 0] = cosf(a)*cosf(b); rm.d[1*4 + 0] = sinf(a)*cosf(b); rm.d[2*4 + 0] = -sinf(b); rm.d[0*4 + 1] = cosf(a)*sinf(b)*sinf(c)-sinf(a)*cosf(c); rm.d[1*4 + 1] = sinf(a)*sinf(b)*sinf(c)+cosf(a)*cosf(c); rm.d[2*4 + 1] = cosf(b)*sinf(c); rm.d[0*4 + 2] = cosf(a)*sinf(b)*cosf(c)+sinf(a)*sinf(c); rm.d[1*4 + 2] = sinf(a)*sinf(b)*cosf(c)-cosf(a)*sinf(c); rm.d[2*4 + 2] = cosf(b)*cosf(c); rm.d[0*4 + 3] = 0; rm.d[1*4 + 3] = 0; rm.d[2*4 + 3] = 0; rm.d[3*4 + 3] = 1; /*the bottom right corner of the matrix.*/ rm.d[3*4 + 0] = 0; rm.d[3*4 + 1] = 0; rm.d[3*4 + 2] = 0; return rm; } static inline f_ clampf( f_ a, f_ min, f_ max){ if(a<min) return min; if(a>max) return max; return a; } static inline f_ lengthv3( vec3 a){ return sqrtf(a.d[0] * a.d[0] + a.d[1] * a.d[1] + a.d[2] * a.d[2]); } static inline f_ lengthv4( vec4 a){ return sqrtf(a.d[0] * a.d[0] + a.d[1] * a.d[1] + a.d[2] * a.d[2] + a.d[3] * a.d[3]); } static inline vec3 multvec3( vec3 a, vec3 b){ return (vec3){ .d[0]=a.d[0]*b.d[0], .d[1]=a.d[1]*b.d[1], .d[2]=a.d[2]*b.d[2] }; } static inline vec4 multvec4( vec4 a, vec4 b){ return (vec4){ .d[0]=a.d[0]*b.d[0], .d[1]=a.d[1]*b.d[1], .d[2]=a.d[2]*b.d[2], .d[3]=a.d[3]*b.d[3] }; } static inline vec3 clampvec3( vec3 a, vec3 min, vec3 max){ vec3 ret; ret.d[0] = clampf(a.d[0],min.d[0],max.d[0]); ret.d[1] = clampf(a.d[1],min.d[1],max.d[1]); ret.d[2] = clampf(a.d[2],min.d[2],max.d[2]); return ret; } static inline vec4 clampvec4( vec4 a, vec4 min, vec4 max){ vec4 ret; ret.d[0] = clampf(a.d[0],min.d[0],max.d[0]); ret.d[1] = clampf(a.d[1],min.d[1],max.d[1]); ret.d[2] = clampf(a.d[2],min.d[2],max.d[2]); ret.d[3] = clampf(a.d[3],min.d[3],max.d[3]); return ret; } static inline f_ dotv3( vec3 a, vec3 b){ return a.d[0] * b.d[0] + a.d[1] * b.d[1] + a.d[2] * b.d[2]; } static inline f_ dotv4( vec4 a, vec4 b){ return a.d[0] * b.d[0] + a.d[1] * b.d[1] + a.d[2] * b.d[2] + a.d[3] * b.d[3]; } static inline vec4 getrow( mat4 a, uint index){ return (vec4){ .d[0]=a.d[index], .d[1]=a.d[4+index], .d[2]=a.d[8+index], .d[3]=a.d[12+index] }; } static inline mat4 swapRowColumnMajor( mat4 in){ mat4 result; vec4 t; int i = 0; t = getrow(in,i); memcpy(result.d+i*4, t.d, 4*4);i++; t = getrow(in,i); memcpy(result.d+i*4, t.d, 4*4);i++; t = getrow(in,i); memcpy(result.d+i*4, t.d, 4*4);i++; t = getrow(in,i); memcpy(result.d+i*4, t.d, 4*4); return result; } static inline vec4 getcol( mat4 a, uint index){ return (vec4){ .d[0]=a.d[index*4], .d[1]=a.d[index*4+1], .d[2]=a.d[index*4+2], .d[3]=a.d[index*4+3] }; } static inline mat4 multm4( mat4 a, mat4 b){ mat4 ret; #pragma omp simd for(int i = 0; i < 4; i++) for(int j = 0; j < 4; j++) ret.d[i*4 + j] = dotv4( /*j is the ROW of the target, i is the COLUMN.*/ getrow(a, j), /*we retrieve the same ROW as our ROW INDEX.*/ getcol(b, i) /*we retrieve the same COLUMN as our COLUMN INDEX.*/ ); return ret; } static inline vec4 mat4xvec4( mat4 t, vec4 v){ vec4 vr; /* Getting a ROW of the matrix and dotting it with the COLUMN VECTOR to get ONE ROW of the output COLUMN VECTOR- one float.*/ vr.d[0] = t.d[0*4] * v.d[0] + t.d[1*4] * v.d[1] + t.d[2*4] * v.d[2] + t.d[3*4] * v.d[3]; vr.d[1] = t.d[0*4+1] * v.d[0] + t.d[1*4+1] * v.d[1] + t.d[2*4+1] * v.d[2] + t.d[3*4+1] * v.d[3]; vr.d[2] = t.d[0*4+2] * v.d[0] + t.d[1*4+2] * v.d[1] + t.d[2*4+2] * v.d[2] + t.d[3*4+2] * v.d[3]; vr.d[3] = t.d[0*4+3] * v.d[0] + t.d[1*4+3] * v.d[1] + t.d[2*4+3] * v.d[2] + t.d[3*4+3] * v.d[3]; return vr; } static inline vec3 crossv3( vec3 a, vec3 b){ vec3 retval; retval.d[0] = a.d[1] * b.d[2] - a.d[2] * b.d[1]; retval.d[1] = a.d[2] * b.d[0] - a.d[0] * b.d[2]; retval.d[2] = a.d[0] * b.d[1] - a.d[1] * b.d[0]; return retval; } static inline vec3 scalev3( f_ s, vec3 i){i.d[0] *= s; i.d[1] *= s; i.d[2] *= s; return i;} static inline vec4 scalev4( f_ s, vec4 i){i.d[0] *= s; i.d[1] *= s; i.d[2] *= s;i.d[3] *= s; return i;} static inline vec3 normalizev3( vec3 a){ if(lengthv3(a)==0) return (vec3){.d[0]=0.0,.d[1]=0.0,.d[2]=1.0}; return scalev3(1.0/lengthv3(a), a); } static inline vec4 normalizev4( vec4 a){ if(lengthv4(a)==0) return (vec4){.d[0]=0.0,.d[1]=0.0,.d[2]=1.0,.d[3]=0.0}; return scalev4(1.0/lengthv4(a), a); } static inline vec3 addv3( vec3 aa, vec3 b){ vec3 a = aa; a.d[0] += b.d[0]; a.d[1] += b.d[1]; a.d[2] += b.d[2]; return a; } static inline vec3 rotatev3( vec3 in, vec3 axis, f_ ang){ vec3 t1 = scalev3(cosf(ang),in); vec3 t2 = scalev3(sinf(ang),crossv3(axis,in)); vec3 t3 = scalev3((1-cosf(ang))*dotv3(axis,in),axis); return addv3(t1,addv3(t2,t3)); } static inline vec4 addv4( vec4 aa, vec4 b){ vec4 a = aa; a.d[0] += b.d[0]; a.d[1] += b.d[1]; a.d[2] += b.d[2]; a.d[3] += b.d[3]; return a; } static inline vec3 subv3( vec3 a, vec3 b){ return addv3(a,scalev3(-1,b)); } static inline mat4 identitymat4(){ return scalemat4( (vec4){.d[0]=1.0,.d[1]=1.0,.d[2]=1.0,.d[3]=1.0} ); } static inline mat4 translate( vec3 t){ mat4 tm = identitymat4(); tm.d[3*4+0] = t.d[0]; tm.d[3*4+1] = t.d[1]; tm.d[3*4+2] = t.d[2]; return tm; } static inline vec4 subv4( vec4 a, vec4 b){ return addv4(a,scalev4(-1,b)); } static inline vec3 reflect( vec3 in, vec3 norm){ return addv3(in, scalev3(-2.0*dotv3(norm, in), norm ) ); } static inline vec4 upv3( vec3 in, f_ w){ return (vec4){ .d[0]=in.d[0], .d[1]=in.d[1], .d[2]=in.d[2], .d[3]=w }; } static inline vec3 downv4( vec4 in){ return (vec3){ .d[0]=in.d[0], .d[1]=in.d[1], .d[2]=in.d[2] }; } static inline mat4 lookAt( vec3 eye, vec3 at, vec3 up){ mat4 cw = identitymat4(); vec3 zaxis = normalizev3(subv3(at,eye)); vec3 xaxis = normalizev3(crossv3(zaxis,up)); vec3 yaxis = crossv3(xaxis, zaxis); zaxis = scalev3(-1,zaxis); cw.d[0*4+0] = xaxis.d[0]; cw.d[1*4+0] = xaxis.d[1]; cw.d[2*4+0] = xaxis.d[2]; cw.d[3*4+0] = -dotv3(xaxis,eye); cw.d[0*4+1] = yaxis.d[0]; cw.d[1*4+1] = yaxis.d[1]; cw.d[2*4+1] = yaxis.d[2]; cw.d[3*4+1] = -dotv3(yaxis,eye); cw.d[0*4+2] = zaxis.d[0]; cw.d[1*4+2] = zaxis.d[1]; cw.d[2*4+2] = zaxis.d[2]; cw.d[3*4+2] = -dotv3(zaxis,eye); cw.d[0*4+3] = 0; cw.d[1*4+3] = 0; cw.d[2*4+3] = 0; cw.d[3*4+3] = 1; return cw; } /* Collision detection These Algorithms return the penetration vector into the shape in the first argument With depth of penetration in element 4 if depth of penetration is zero or lower then there is no penetration. */ static inline vec4 spherevsphere( vec4 s1, vec4 s2){ vec4 ret; vec3 diff = subv3( downv4(s2), downv4(s1) ); f_ lv3 = lengthv3(diff); f_ l = (s1.d[3] + s2.d[3]-lv3); if(l < 0 || lv3 == 0) { ret.d[3] = 0;return ret; } ret = upv3( scalev3( l/lv3,diff ) ,l ); return ret; } static inline int boxvboxbool (aabb b1, aabb b2){ vec3 sumextents = addv3(b1.e,b2.e); vec3 b1c = downv4(b1.c); vec3 b2c = downv4(b2.c); if( !( (fabs(b1c.d[0] - b2c.d[0]) <= sumextents.d[0]) && (fabs(b1c.d[1] - b2c.d[1]) <= sumextents.d[1]) && (fabs(b1c.d[2] - b2c.d[2]) <= sumextents.d[2]) ) ){ return 0; } return 1; } static inline vec4 boxvbox( aabb b1, aabb b2){ /*Just points along the minimum separating axis, Nothing fancy.*/ vec4 ret = (vec4){ .d[0]=0, .d[1]=0, .d[2]=0, .d[3]=0 }; vec3 sumextents = addv3(b1.e,b2.e); vec3 b1c = downv4(b1.c); vec3 b2c = downv4(b2.c); vec3 b1min = subv3(b1c,b1.e); vec3 b2min = subv3(b2c,b2.e); vec3 b1max = addv3(b1c,b1.e); vec3 b2max = addv3(b2c,b2.e); if( !( (fabs(b1c.d[0] - b2c.d[0]) <= sumextents.d[0]) && (fabs(b1c.d[1] - b2c.d[1]) <= sumextents.d[1]) && (fabs(b1c.d[2] - b2c.d[2]) <= sumextents.d[2]) ) ){ return ret; } vec3 axispen[2]; axispen[0] = subv3(b1max,b2min); axispen[1] = subv3(b1min,b2max); ret.d[3] = fabs(axispen[0].d[0]); ret.d[0] = axispen[0].d[0]; for(int i = 1; i < 6; i++){ if(fabs(axispen[i/3].d[i%3]) < fabs(ret.d[3])){ ret = (vec4){ .d[0]=0, .d[1]=0, .d[2]=0, .d[3]=(axispen[i/3].d[i%3]) }; ret.d[i%3] = ret.d[3]; ret.d[3] = fabs(ret.d[3]); } } return ret; } static inline vec3 closestpointAABB( aabb b, vec3 p){ vec3 b1min = subv3(downv4(b.c),b.e); vec3 b1max = addv3(downv4(b.c),b.e); return clampvec3(p,b1min,b1max); } static inline vec4 spherevaabb( vec4 sph, aabb box){ vec4 ret; vec3 p = closestpointAABB(box,downv4(sph)); vec3 v = subv3(p,downv4(sph)); f_ d2 = dotv3(v,v); if(d2 <= sph.d[3] * sph.d[3]){ f_ len = lengthv3(v); f_ diff = (sph.d[3] - len); if(len > 0){ f_ factor = diff/len; vec3 bruh = scalev3(factor, v); ret = upv3(bruh, diff); return ret; } else { aabb virt; virt.c = sph; virt.e.d[0] = sph.d[3]; virt.e.d[1] = sph.d[3]; virt.e.d[2] = sph.d[3]; return boxvbox(virt,box); } } else return (vec4){ .d[0]=0, .d[1]=0, .d[2]=0, .d[3]=0 }; } /*END Math_Library.h~~~~~~~~~~~~~~~~~~~~*/ #endif
omp_smithW-v7-adaptive.c
/********************************************************************************* * Smith–Waterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * * Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode * gcc omp_smithW.c -O3 -o omp_smithW -fopenmp // production run * Execution: ./omp_smithW <number_of_col> <number_of_rows> * * Updated by C. Liao, Jan 2nd, 2019 *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <time.h> #include <assert.h> #include <stdbool.h> // C99 does not support the boolean data type #include "parameters.h" /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) // #define DEBUG /* End of Helpers */ #ifndef _OPENMP #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double omp_get_wtime() { return time_stamp(); } #endif /*-------------------------------------------------------------------- * Functions Prototypes */ #pragma omp declare target //Defines size of strings to be compared long long int m = 8 ; //Columns - Size of string a long long int n = 9; //Lines - Size of string b int gapScore = -2; //Defines scores int matchScore = 3; int missmatchScore = -3; //Strings over the Alphabet Sigma char *a, *b; int matchMissmatchScore(long long int i, long long int j); void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos); #pragma omp end declare target // without omp critical: how to conditionalize it? void similarityScore2(long long int i, long long int j, int* H, int* P, long long int* maxPos); void backtrack(int* P, long long int maxPos); void printMatrix(int* matrix); void printPredecessorMatrix(int* matrix); void generate(void); long long int nElement(long long int i); void calcFirstDiagElement(long long int i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ bool useBuiltInData=true; int MEDIUM=10240; int LARGE=20480; // max 46340 for GPU of 16GB Device memory // the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s. /* End of global variables */ /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { // thread_count is no longer used int thread_count; if (argc==3) { m = strtoll(argv[1], NULL, 10); n = strtoll(argv[2], NULL, 10); useBuiltInData = false; } //#ifdef DEBUG if (useBuiltInData) { printf ("Usage: %s m n\n", argv[0]); printf ("Using built-in data for testing ..\n"); } printf("Problem size: Matrix[%lld][%lld], Medium=%d Large=%d\n", n, m, MEDIUM, LARGE); //#endif //Allocates a and b a = (char*) malloc(m * sizeof(char)); // printf ("debug: a's address=%p\n", a); b = (char*) malloc(n * sizeof(char)); // printf ("debug: b's address=%p\n", b); //Because now we have zeros m++; n++; //Allocates similarity matrix H int *H; H = (int *) calloc(m * n, sizeof(int)); // printf ("debug: H's address=%p\n", H); //Allocates predecessor matrix P int *P; P = (int *)calloc(m * n, sizeof(int)); // printf ("debug: P's address=%p\n", P); unsigned long long sz = (m+n +2*m*n)*sizeof(int)/1024/1024; if (sz>=1024) printf("Total memory footprint is:%llu GB\n", sz/1024) ; else printf("Total memory footprint is:%llu MB\n", sz) ; if (useBuiltInData) { //Uncomment this to test the sequence available at //http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 // OBS: m=11 n=7 // a[0] = 'C'; // a[1] = 'G'; // a[2] = 'T'; // a[3] = 'G'; // a[4] = 'A'; // a[5] = 'A'; // a[6] = 'T'; // a[7] = 'T'; // a[8] = 'C'; // a[9] = 'A'; // a[10] = 'T'; // b[0] = 'G'; // b[1] = 'A'; // b[2] = 'C'; // b[3] = 'T'; // b[4] = 'T'; // b[5] = 'A'; // b[6] = 'C'; // https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example // Using the wiki example to verify the results b[0] = 'G'; b[1] = 'G'; b[2] = 'T'; b[3] = 'T'; b[4] = 'G'; b[5] = 'A'; b[6] = 'C'; b[7] = 'T'; b[8] = 'A'; a[0] = 'T'; a[1] = 'G'; a[2] = 'T'; a[3] = 'T'; a[4] = 'A'; a[5] = 'C'; a[6] = 'G'; a[7] = 'G'; } else { //Gen random arrays a and b generate(); } //Start position for backtrack long long int maxPos = 0; //Calculates the similarity matrix long long int i, j; // The way to generate all wavefront is to go through the top edge elements // starting from the left top of the matrix, go to the bottom top -> down, then left->right // total top edge element count = dim1_size + dim2_size -1 //Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; #ifdef DEBUG printf("nDiag=%lld\n", nDiag); printf("Number of wavefront lines and their first element positions:\n"); #endif #ifdef _OPENMP #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); printf ("Using %d out of max %d threads...\n", thread_count, omp_get_max_threads()); } } // detect GPU support int runningOnGPU = 0; printf ("The number of target devices =%d\n", omp_get_num_devices()); /* Test if GPU is available using OpenMP4.5 */ #pragma omp target map(from:runningOnGPU) { // This function returns true if currently running on the host device, false otherwise. if (!omp_is_initial_device()) runningOnGPU = 1; } /* If still running on CPU, GPU must not be available */ if (runningOnGPU == 1) printf("### Able to use the GPU! ### \n"); else printf("### Unable to use the GPU, using CPU! ###\n"); #endif //Gets Initial time double initialTime = omp_get_wtime(); // mistake: element count, not byte size!! // int asz= m*n*sizeof(int); int asz= m*n; // choice 2: map data before the outer loop //#pragma omp target map (to:a[0:m], b[0:n], nDiag, m,n,gapScore, matchScore, missmatchScore) map(tofrom: H[0:asz], P[0:asz], maxPos) // #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i) { for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding { long long int nEle, si, sj; // report at most 5 times for each diagonal line long long interval = nDiag/5; // nEle = nElement(i); //---------------inlined ------------ if (i < m && i < n) { // smaller than both directions //Number of elements in the diagonal is increasing nEle = i; } else if (i < max(m, n)) { // smaller than only one direction //Number of elements in the diagonal is stable long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size nEle = min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); nEle = 2 * min - i + llabs(m - n) - 2; } //calcFirstDiagElement(i, &si, &sj); //------------inlined--------------------- // Calculate the first element of diagonal if (i < n) { // smaller than row count si = i; sj = 1; // start from the j==1 since j==0 is the padding } else { // now we sweep horizontally at the bottom of the matrix si = n - 1; // i is fixed sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1 } // serial version: 0 to < medium: small data set if (nEle< MEDIUM) { if (i%interval==0) printf ("Serial version is activated since the diagonal element count %lld is less than MEDIUM %d\n", nEle, MEDIUM); for (j = 0; j < nEle; ++j) { // going upwards : anti-diagnol direction long long int ai = si - j ; // going up vertically long long int aj = sj + j; // going right in horizontal similarityScore2(ai, aj, H, P, &maxPos); // a specialized version without a critical section used inside } } else if (nEle<LARGE) // omp cpu version: medium to large: medium data set { if (i%interval==0) printf ("OpenMP CPU version is activated since the diagonal element count %lld is less than LARGE %d\n", nEle, LARGE); #pragma omp parallel for private(j) shared (nEle, si, sj, H, P, maxPos) for (j = 0; j < nEle; ++j) { // going upwards : anti-diagnol direction long long int ai = si - j ; // going up vertically long long int aj = sj + j; // going right in horizontal similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside } } else // omp gpu version: large data set //-------------------------------------- { if (i%interval==0) printf ("OpenMP GPU version is activated since the diagonal element count %lld >= LARGE %d\n", nEle, LARGE); // choice 1: map data before the inner loop #pragma omp target map (to:a[0:m], b[0:n], nEle, m,n,gapScore, matchScore, missmatchScore, si, sj) map(tofrom: H[0:asz], P[0:asz], maxPos) #pragma omp parallel for default(none) private(j) shared (a,b, nEle, m, n, gapScore, matchScore, missmatchScore, si, sj, H, P, maxPos) for (j = 0; j < nEle; ++j) { // going upwards : anti-diagnol direction long long int ai = si - j ; // going up vertically long long int aj = sj + j; // going right in horizontal ///------------inlined ------------------------------------------ // similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside { int up, left, diag; //Stores index of element long long int index = m * ai + aj; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal int t_mms; if (a[aj - 1] == b[ai - 1]) t_mms = matchScore; else t_mms = missmatchScore; diag = H[index - m - 1] + t_mms; // matchMissmatchScore(i, j); // degug here // return; //Calculates the maximum int max = NONE; int pred = NONE; if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[maxPos]) { #pragma omp critical maxPos = index; } } // --------------------------------------------------------------- } } } // for end nDiag } // end omp parallel double finalTime = omp_get_wtime(); printf("\nElapsed time for scoring matrix computation: %f\n", finalTime - initialTime); initialTime = omp_get_wtime(); backtrack(P, maxPos); finalTime = omp_get_wtime(); //Gets backtrack time finalTime = omp_get_wtime(); printf("Elapsed time for backtracking: %f\n", finalTime - initialTime); #ifdef DEBUG printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); #endif if (useBuiltInData) { printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false"); assert (H[n*m-1]==7); } //Frees similarity matrixes free(H); free(P); //Frees input arrays free(a); free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal's elements * i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored. */ long long int nElement(long long int i) { if (i < m && i < n) { // smaller than both directions //Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { // smaller than only one direction //Number of elements in the diagonal is stable long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size return min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); return 2 * min - i + llabs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding * Purpose: Calculate the position of (si, sj)-element * n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront */ void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (i < n) { // smaller than row count *si = i; *sj = 1; // start from the j==1 since j==0 is the padding } else { // now we sweep horizontally at the bottom of the matrix *si = n - 1; // i is fixed *sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1 } } /* // understanding the calculation by an example n =6 // row m =2 // col padded scoring matrix n=7 m=3 0 1 2 ------- 0 x x x 1 x x x 2 x x x 3 x x x 4 x x x 5 x x x 6 x x x We should peel off top row and left column since they are the padding the remaining 6x2 sub matrix is what is interesting for us Now find the number of wavefront lines and their first element's position in the scoring matrix total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1 We use the left most element in each wavefront line as its first element. Then we have the first elements like (1,1), (2,1) (3,1) .. (6,1) (6,2) */ /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j) * int *P; the predecessor array,storing which of the three elements is picked with max value */ #pragma omp declare target void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal int t_mms; if (a[j - 1] == b[i - 1]) t_mms = matchScore; else t_mms = missmatchScore; diag = H[index - m - 1] + t_mms; // matchMissmatchScore(i, j); // degug here // return; //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[*maxPos]) { #pragma omp critical *maxPos = index; } } /* End of similarityScore */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ int matchMissmatchScore(long long int i, long long int j) { if (a[j - 1] == b[i - 1]) return matchScore; else return missmatchScore; } /* End of matchMissmatchScore */ #pragma omp end declare target void similarityScore2(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal diag = H[index - m - 1] + matchMissmatchScore(i, j); //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[*maxPos]) { *maxPos = index; } } /* End of similarityScore2 */ /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ void backtrack(int* P, long long int maxPos) { //hold maxPos value long long int predPos; //backtrack from maxPos to startPos = 0 do { if (P[maxPos] == DIAGONAL) predPos = maxPos - m - 1; else if (P[maxPos] == UP) predPos = maxPos - m; else if (P[maxPos] == LEFT) predPos = maxPos - 1; P[maxPos] *= PATH; maxPos = predPos; } while (P[maxPos] != NONE); } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(int* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf("↑ "); else if (matrix[index] == -LEFT) printf("← "); else if (matrix[index] == -DIAGONAL) printf("↖ "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf("↑ "); else if (matrix[index] == LEFT) printf("← "); else if (matrix[index] == DIAGONAL) printf("↖ "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
_lensquest_cxx_old.h
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <sstream> #include <cassert> #include <omp.h> #include <healpix_map.h> #include <alm.h> #include <alm_healpix_tools.h> #include <libsharp/sharp_cxx.h> #include <powspec.h> #include <datatypes.h> #include "wignerSymbols-cpp.cpp" #include "kernels.cpp" #include <Python.h> void computef(std::vector< std::vector< std::vector<double> > >& f, size_t L, PowSpec& wcl, size_t lminCMB, size_t lmaxCMB, int num_spec){ std::vector< std::vector<double> > F; if (num_spec==5) { F=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_phi(F, L, 2, lmaxCMB+1); } std::vector< std::vector<double> > Fz(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_phi(Fz, L, 0, lmaxCMB+1); if (num_spec==1) { #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if ((l1+L+l3)%2==0) { f[tt][l1][l3]=wcl.tt(l1)*Fz[l3][l1]+wcl.tt(l3)*Fz[l1][l3]; } else { f[tt][l1][l3]=0.0; } } } } else if (num_spec==5) { #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if ((l1+L+l3)%2==0) { f[tt][l1][l3]=wcl.tt(l1)*Fz[l3][l1]+wcl.tt(l3)*Fz[l1][l3]; f[te][l1][l3]=wcl.tg(l1)*F[l3][l1]+wcl.tg(l3)*Fz[l1][l3]; f[ee][l1][l3]=wcl.gg(l1)*F[l3][l1]+wcl.gg(l3)*F[l1][l3]; f[tb][l1][l3]=0.0; f[eb][l1][l3]=0.0; } else { f[tt][l1][l3]=0.0; f[te][l1][l3]=0.0; f[ee][l1][l3]=0.0; f[tb][l1][l3]=-wcl.tg(l1)*F[l3][l1]; f[eb][l1][l3]=-wcl.gg(l1)*F[l3][l1]-wcl.cc(l3)*F[l1][l3]; } } } } else std::cout << "I don't know what to do, yet" << std::endl; } void computef(int spec, std::vector< std::vector<double> > & f, size_t L, PowSpec& wcl, size_t lminCMB, size_t lmaxCMB){ std::vector< std::vector<double> > F,Fz; if (spec>=te) { F=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_phi(F, L, 2, lmaxCMB+1); } if (spec<=te) { Fz=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_phi(Fz, L, 0, lmaxCMB+1); } #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if ((l1+L+l3)%2==0) { if (spec==tt) f[l1][l3]=wcl.tt(l1)*Fz[l3][l1]+wcl.tt(l3)*Fz[l1][l3]; else if (spec==te) f[l1][l3]=wcl.tg(l1)*F[l3][l1]+wcl.tg(l3)*Fz[l1][l3]; else if (spec==ee) f[l1][l3]=wcl.gg(l1)*F[l3][l1]+wcl.gg(l3)*F[l1][l3]; else if (spec==tb) f[l1][l3]=0.0; else if (spec==eb) f[l1][l3]=0.0; } else { if (spec==tt) f[l1][l3]=0.0; else if (spec==te) f[l1][l3]=0.0; else if (spec==ee) f[l1][l3]=0.0; else if (spec==tb) f[l1][l3]=-wcl.tg(l1)*F[l3][l1]; else if (spec==eb) f[l1][l3]=-wcl.gg(l1)*F[l3][l1]-wcl.cc(l3)*F[l1][l3]; } } } } void computef_noise(std::vector< std::vector< std::vector<double> > >& f, size_t L, size_t lminCMB, size_t lmaxCMB, int num_spec){ std::vector< std::vector<double> > F; if (num_spec==5) { F=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_2_noise(F, L, 2, lmaxCMB+1); } std::vector< std::vector<double> > Fz(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_2_noise(Fz, L, 0, lmaxCMB+1); if (num_spec==1) { #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { f[tt][l1][l3]=Fz[l1][l3]; } } } else if (num_spec==5) { #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { f[tt][l1][l3]=Fz[l1][l3]; f[te][l1][l3]=0.; f[ee][l1][l3]=F[l1][l3]; f[tb][l1][l3]=0.; f[eb][l1][l3]=F[l1][l3]; } } } else std::cout << "I don't know what to do, yet" << std::endl; } void computef_noise(int spec, std::vector< std::vector<double> >& f, size_t L, size_t lminCMB, size_t lmaxCMB){ std::vector< std::vector<double> > F,Fz; if (spec>=1) { F=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_2_noise(F, L, 2, lmaxCMB+1); } if (spec<=1) { Fz=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_2_noise(Fz, L, 0, lmaxCMB+1); } #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if (spec==tt) f[l1][l3]=Fz[l1][l3]; else if (spec==te) f[l1][l3]=0.; else if (spec==ee) f[l1][l3]=F[l1][l3]; else if (spec==tb) f[l1][l3]=0.; else if (spec==eb) f[l1][l3]=F[l1][l3]; } } } void computef_mask(std::vector< std::vector< std::vector<double> > >& f, size_t L, PowSpec& wcl, size_t lminCMB, size_t lmaxCMB, int num_spec){ std::vector< std::vector<double> > F; if (num_spec==5) { F=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_2_mask(F, L, 2, lmaxCMB+1); } std::vector< std::vector<double> > Fz(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_2_mask(Fz, L, 0, lmaxCMB+1); if (num_spec==1) { #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if ((l1+L+l3)%2==0) { f[tt][l1][l3]=wcl.tt(l1)*Fz[l3][l1]+wcl.tt(l3)*Fz[l1][l3]; } else { f[tt][l1][l3]=0.0; } } } } else if (num_spec==5) { #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if ((l1+L+l3)%2==0) { f[tt][l1][l3]=wcl.tt(l1)*Fz[l3][l1]+wcl.tt(l3)*Fz[l1][l3]; f[te][l1][l3]=wcl.tg(l1)*F[l3][l1]+wcl.tg(l3)*Fz[l1][l3]; f[ee][l1][l3]=wcl.gg(l1)*F[l3][l1]+wcl.gg(l3)*F[l1][l3]; f[tb][l1][l3]=0.0; f[eb][l1][l3]=0.0; } else { f[tt][l1][l3]=0.0; f[te][l1][l3]=0.0; f[ee][l1][l3]=0.0; f[tb][l1][l3]=-wcl.tg(l1)*F[l3][l1]; f[eb][l1][l3]=-wcl.gg(l1)*F[l3][l1]-wcl.cc(l3)*F[l1][l3]; } } } } else std::cout << "I don't know what to do, yet" << std::endl; } void computef_mask(int spec, std::vector< std::vector<double> > & f, size_t L, PowSpec& wcl, size_t lminCMB, size_t lmaxCMB){ std::vector< std::vector<double> > F,Fz; if (spec>=1) { F=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_2_mask(F, L, 2, lmaxCMB+1); } if (spec<=1) { Fz=std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); compF_2_mask(Fz, L, 0, lmaxCMB+1); } #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if ((l1+L+l3)%2==0) { if (spec==tt) f[l1][l3]=wcl.tt(l1)*Fz[l3][l1]+wcl.tt(l3)*Fz[l1][l3]; else if (spec==te) f[l1][l3]=wcl.tg(l1)*F[l3][l1]+wcl.tg(l3)*Fz[l1][l3]; else if (spec==ee) f[l1][l3]=wcl.gg(l1)*F[l3][l1]+wcl.gg(l3)*F[l1][l3]; else if (spec==tb) f[l1][l3]=0.0; else if (spec==eb) f[l1][l3]=0.0; } else { if (spec==tt) f[l1][l3]=0.0; else if (spec==te) f[l1][l3]=0.0; else if (spec==ee) f[l1][l3]=0.0; else if (spec==tb) f[l1][l3]=-wcl.tg(l1)*F[l3][l1]; else if (spec==eb) f[l1][l3]=-wcl.gg(l1)*F[l3][l1]-wcl.cc(l3)*F[l1][l3]; } } } } std::vector< std::vector<double> > computeKernel(std::string stype, PowSpec& wcl, PowSpec& dcl, size_t lminCMB, size_t L) { size_t lmaxCMB=dcl.Lmax(); int type = string2esttype(stype); std::vector< std::vector<double> > f(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); std::vector<double> invlcltt(lmaxCMB+1,0.0), invlclee(lmaxCMB+1,0.0), invlclbb(lmaxCMB+1,0.0); #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { invlcltt[l1]=1./dcl.tt(l1); invlclee[l1]=1./dcl.gg(l1); invlclbb[l1]=1./dcl.cc(l1); } std::vector< std::vector<double> > out(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); computef(type,f,L,wcl,2,lmaxCMB); if (L>=2) { #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if (type==tt) out[l1][l3]=f[l1][l3]*f[l1][l3]*invlcltt[l1]*invlcltt[l3]*.5/(2.0*L+1.0); else if (type==te) out[l1][l3]=f[l1][l3]*f[l1][l3]*invlcltt[l1]*invlclee[l3]/(2.0*L+1.0); else if (type==ee) out[l1][l3]=f[l1][l3]*f[l1][l3]*invlclee[l1]*invlclee[l3]*.5/(2.0*L+1.0); else if (type==tb) out[l1][l3]=f[l1][l3]*f[l1][l3]*invlcltt[l1]*invlclbb[l3]/(2.0*L+1.0); else if (type==eb) out[l1][l3]=f[l1][l3]*f[l1][l3]*invlclee[l1]*invlclbb[l3]/(2.0*L+1.0); } } } return out; } void makeA(PowSpec& wcl, PowSpec& dcl, PowSpec& al, size_t lmin, size_t lmax, size_t lminCMB) { size_t lmaxCMB=dcl.Lmax(); int num_spec; if (wcl.Num_specs()==1) {num_spec=1; assert(al.Num_specs()>=1);} if (wcl.Num_specs()==4) {num_spec=5; assert(al.Num_specs()>=6);} std::vector< std::vector< std::vector<double> > > f(num_spec, std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0))); std::vector<double> invlcltt(lmaxCMB+1,0.0), invlclee(lmaxCMB+1,0.0), invlclbb(lmaxCMB+1,0.0); #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { invlcltt[l1]=1./dcl.tt(l1); if (num_spec==5) { invlclee[l1]=1./dcl.gg(l1); invlclbb[l1]=1./dcl.cc(l1); } } double att, ate, aee, atb, aeb; for (size_t L=lmin;L<lmax+1;L++) { // std::cout << " Computing amplitude ... " << (int)(L*100./lmax) << " %\r"; std::cout.flush(); computef(f,L,wcl,lminCMB,lmaxCMB,num_spec); att=0.; ate=0.; aee=0.; atb=0.; aeb=0.; if (L>=lmin) { #pragma omp parallel for reduction(+:att, ate, aee, atb, aeb) schedule(dynamic, 25) for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { att+=f[tt][l1][l3]*f[tt][l1][l3]*invlcltt[l1]*invlcltt[l3]*.5; if (num_spec==5) { ate+=f[te][l1][l3]*f[te][l1][l3]*invlcltt[l1]*invlclee[l3]; aee+=f[ee][l1][l3]*f[ee][l1][l3]*invlclee[l1]*invlclee[l3]*.5; atb+=f[tb][l1][l3]*f[tb][l1][l3]*invlcltt[l1]*invlclbb[l3]; aeb+=f[eb][l1][l3]*f[eb][l1][l3]*invlclee[l1]*invlclbb[l3]; } } } } al.tt(L) = (att!=0.) ? (2.0*L+1.0)/att : 0.0; if (num_spec==5) { al.tg(L) = (ate!=0.) ? (2.0*L+1.0)/ate : 0.0; al.gg(L) = (aee!=0.) ? (2.0*L+1.0)/aee : 0.0; al.tc(L) = (atb!=0.) ? (2.0*L+1.0)/atb : 0.0; al.gc(L) = (aeb!=0.) ? (2.0*L+1.0)/aeb : 0.0; } if(PyErr_CheckSignals() == -1) { throw invalid_argument( "Keyboard interrupt" ); } } } std::vector< std::vector<double> > makeAN(PowSpec& wcl, PowSpec& dcl, PowSpec& rdcls, PowSpec& al, size_t lmin, size_t lmax, size_t lminCMB1, size_t lminCMB2, size_t lmaxCMB1, size_t lmaxCMB2) { int num_spec=5; assert(wcl.Num_specs()==4); size_t lmaxCMB=max(lmaxCMB1,lmaxCMB2); size_t lminCMB=min(lminCMB1,lminCMB2); std::vector< std::vector<double> > bias(9, std::vector<double>(lmax+1,0.0)); std::vector< std::vector< std::vector<double> > > f(num_spec, std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0))); std::vector<double> invlcltt(lmaxCMB+1,0.0), invlclee(lmaxCMB+1,0.0), invlclbb(lmaxCMB+1,0.0); #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { invlcltt[l1]=1./dcl.tt(l1); invlclee[l1]=1./dcl.gg(l1); invlclbb[l1]=1./dcl.cc(l1); } double ntttt, nttte, nttee, ntete, nteee, neeee, ntbtb, ntbeb, nebeb, att, ate, aee, atb, aeb; for (size_t L=lmin;L<lmax+1;L++) { computef(f,L,wcl,lminCMB,lmaxCMB,num_spec); ntttt=0.; nttte=0.; nttee=0.; ntete=0.; nteee=0.; neeee=0.; ntbtb=0.; ntbeb=0.; nebeb=0.; att=0.; ate=0.; aee=0.; atb=0.; aeb=0.; if (L>=lmin) { #pragma omp parallel for reduction(+:att, ate, aee, atb, aeb, ntttt, nttte, nttee, ntete, nteee, neeee, ntbtb, ntbeb, nebeb) schedule(dynamic, 25) for (size_t l1=lminCMB1;l1<lmaxCMB1+1;l1++) { for (size_t l3=lminCMB2;l3<lmaxCMB2+1;l3++) { att+=f[tt][l1][l3]*f[tt][l1][l3]*invlcltt[l1]*invlcltt[l3]*.5; ate+=f[te][l1][l3]*f[te][l1][l3]*invlcltt[l1]*invlclee[l3]; aee+=f[ee][l1][l3]*f[ee][l1][l3]*invlclee[l1]*invlclee[l3]*.5; atb+=f[tb][l1][l3]*f[tb][l1][l3]*invlcltt[l1]*invlclbb[l3]; aeb+=f[eb][l1][l3]*f[eb][l1][l3]*invlclee[l1]*invlclbb[l3]; ntttt+=f[tt][l1][l3]*invlcltt[l1]*invlcltt[l3]*(f[tt][l1][l3]*invlcltt[l1]*invlcltt[l3]*rdcls.tt(l1)*rdcls.tt(l3)+sgn(L+l1+l3)*f[tt][l3][l1]*invlcltt[l3]*invlcltt[l1]*rdcls.tt(l1)*rdcls.tt(l3))*.25; nttte+=f[tt][l1][l3]*invlcltt[l1]*invlcltt[l3]*(f[te][l1][l3]*invlcltt[l1]*invlclee[l3]*rdcls.tt(l1)*rdcls.tg(l3)+sgn(L+l1+l3)*f[te][l3][l1]*invlcltt[l3]*invlclee[l1]*rdcls.tg(l1)*rdcls.tt(l3))*.5; nttee+=f[tt][l1][l3]*invlcltt[l1]*invlcltt[l3]*(f[ee][l1][l3]*invlclee[l1]*invlclee[l3]*rdcls.tg(l1)*rdcls.tg(l3)+sgn(L+l1+l3)*f[ee][l3][l1]*invlclee[l3]*invlclee[l1]*rdcls.tg(l1)*rdcls.tg(l3))*.25; ntete+=f[te][l1][l3]*invlcltt[l1]*invlclee[l3]*(f[te][l1][l3]*invlcltt[l1]*invlclee[l3]*rdcls.tt(l1)*rdcls.gg(l3)+sgn(L+l1+l3)*f[te][l3][l1]*invlcltt[l3]*invlclee[l1]*rdcls.tg(l1)*rdcls.tg(l3)); nteee+=f[te][l1][l3]*invlcltt[l1]*invlclee[l3]*(f[ee][l1][l3]*invlclee[l1]*invlclee[l3]*rdcls.tg(l1)*rdcls.gg(l3)+sgn(L+l1+l3)*f[ee][l3][l1]*invlclee[l3]*invlclee[l1]*rdcls.tg(l1)*rdcls.gg(l3))*.5; neeee+=f[ee][l1][l3]*invlclee[l1]*invlclee[l3]*(f[ee][l1][l3]*invlclee[l1]*invlclee[l3]*rdcls.gg(l1)*rdcls.gg(l3)+sgn(L+l1+l3)*f[ee][l3][l1]*invlclee[l3]*invlclee[l1]*rdcls.gg(l1)*rdcls.gg(l3))*.25; ntbtb+=f[tb][l1][l3]*invlcltt[l1]*invlclbb[l3]*(f[tb][l1][l3]*invlcltt[l1]*invlclbb[l3]*rdcls.tt(l1)*rdcls.cc(l3)); ntbeb+=f[tb][l1][l3]*invlcltt[l1]*invlclbb[l3]*(f[eb][l1][l3]*invlclee[l1]*invlclbb[l3]*rdcls.tg(l1)*rdcls.cc(l3)); nebeb+=f[eb][l1][l3]*invlclee[l1]*invlclbb[l3]*(f[eb][l1][l3]*invlclee[l1]*invlclbb[l3]*rdcls.gg(l1)*rdcls.cc(l3)); } } } al.tt(L) = (att!=0.) ? (2.0*L+1.0)/att : 0.0; al.tg(L) = (ate!=0.) ? (2.0*L+1.0)/ate : 0.0; al.gg(L) = (aee!=0.) ? (2.0*L+1.0)/aee : 0.0; al.tc(L) = (atb!=0.) ? (2.0*L+1.0)/atb : 0.0; al.gc(L) = (aeb!=0.) ? (2.0*L+1.0)/aeb : 0.0; bias[tttt][L]=ntttt*al.tt(L)*al.tt(L)/(2.*L+1.); bias[ttte][L]=nttte*al.tt(L)*al.tg(L)/(2.*L+1.); bias[ttee][L]=nttee*al.tt(L)*al.gg(L)/(2.*L+1.); //=ntttb*att*atb/(2.*L+1.); //=ntteb*att*aeb/(2.*L+1.); bias[tete][L]=ntete*al.tg(L)*al.tg(L)/(2.*L+1.); bias[teee][L]=nteee*al.tg(L)*al.gg(L)/(2.*L+1.); //=ntetb*ate*atb/(2.*L+1.); //=nteeb*ate*aeb/(2.*L+1.); bias[eeee][L]=neeee*al.gg(L)*al.gg(L)/(2.*L+1.); //=neetb*aee*atb/(2.*L+1.); //=neeeb*aee*aeb/(2.*L+1.); bias[tbtb][L]=ntbtb*al.tc(L)*al.tc(L)/(2.*L+1.); bias[tbeb][L]=ntbeb*al.tc(L)*al.gc(L)/(2.*L+1.); bias[ebeb][L]=nebeb*al.gc(L)*al.gc(L)/(2.*L+1.); PyErr_CheckSignals(); } return bias; } std::vector< std::vector<double> > makeA_BH(std::string stype, PowSpec& wcl, PowSpec& dcl, size_t lmin, size_t lmax, size_t lminCMB) { size_t lmaxCMB=dcl.Lmax(); int num_spec; int spec = string2esttype(stype); if (wcl.Num_specs()==1) {assert(spec==0);} if (wcl.Num_specs()==4) {assert(spec<=5);} std::vector< std::vector< std::vector<double> > > f(NUM_BH_TYPE, std::vector< std::vector<double> >(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0))); std::vector< std::vector<double> > out(NUM_BH_TYPE_CROSS, std::vector<double>(lmaxCMB+1,0.0)); std::vector< std::vector<double> > invlcl(lmaxCMB+1, std::vector<double>(lmaxCMB+1,0.0)); #pragma omp parallel for for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { if (spec==tt) invlcl[l1][l3]=1./dcl.tt(l1)/dcl.tt(l3)*.5; else if (spec==te) invlcl[l1][l3]=1./dcl.tt(l1)/dcl.gg(l3); else if (spec==ee) invlcl[l1][l3]=1./dcl.gg(l1)/dcl.gg(l3)*.5; else if (spec==tb) invlcl[l1][l3]=1./dcl.tt(l1)/dcl.cc(l3); else if (spec==eb) invlcl[l1][l3]=1./dcl.gg(l1)/dcl.cc(l3); } } double gg, gm, gn, mm, mn, nn; for (size_t L=lmin;L<lmax+1;L++) { // std::cout << " Computing amplitude ... " << (int)(L*100./lmax) << " %\r"; std::cout.flush(); computef(spec,f[grad],L,wcl,lminCMB,lmaxCMB); computef_mask(spec,f[mask],L,wcl,lminCMB,lmaxCMB); computef_noise(spec,f[nois],L,lminCMB,lmaxCMB); gg=0.; gm=0.; gn=0.; mm=0.; mn=0.; nn=0.; if (L>=lmin) { #pragma omp parallel for reduction(+:gg,gm,gn,mm,mn,nn) schedule(dynamic, 25) for (size_t l1=lminCMB;l1<lmaxCMB+1;l1++) { for (size_t l3=lminCMB;l3<lmaxCMB+1;l3++) { gg+=f[grad][l1][l3]*f[grad][l1][l3]*invlcl[l1][l3]; gm+=f[grad][l1][l3]*f[mask][l1][l3]*invlcl[l1][l3]; gn+=f[grad][l1][l3]*f[nois][l1][l3]*invlcl[l1][l3]; mm+=f[mask][l1][l3]*f[mask][l1][l3]*invlcl[l1][l3]; mn+=f[mask][l1][l3]*f[nois][l1][l3]*invlcl[l1][l3]; nn+=f[nois][l1][l3]*f[nois][l1][l3]*invlcl[l1][l3]; } } } out[gradgrad][L] = (gg!=0.) ? (2.0*L+1.0)/gg : 0.0; out[gradmask][L] = (gm!=0.) ? (2.0*L+1.0)/gm : 0.0; out[gradnois][L] = (gn!=0.) ? (2.0*L+1.0)/gn : 0.0; out[maskmask][L] = (mm!=0.) ? (2.0*L+1.0)/mm : 0.0; out[masknois][L] = (mn!=0.) ? (2.0*L+1.0)/mn : 0.0; out[noisnois][L] = (nn!=0.) ? (2.0*L+1.0)/nn : 0.0; if(PyErr_CheckSignals() == -1) { throw invalid_argument( "Keyboard interrupt" ); } } return out; } void alm2map_libsharp(Alm< xcomplex< double > > & almEin, Alm< xcomplex< double > > & almBin, Healpix_Map<double> & mapQout, Healpix_Map<double> & mapUout, size_t spin, arr<double> &weight) { int nside = mapQout.Nside(); size_t lmax = almEin.Lmax(); sharp_cxxjob<double> job; job.set_weighted_Healpix_geometry (nside, &weight[0]); job.set_triangular_alm_info (lmax, lmax); job.alm2map_spin(&almEin(0,0),&almBin(0,0),&mapQout[0],&mapUout[0],spin,false); } void alm2map_libsharp_sY(Alm< xcomplex< double > > & almin, Healpix_Map<double> & mapRout, Healpix_Map<double> & mapIout, int spin, arr<double> &weight) { int nside = mapRout.Nside(); size_t lmax = almin.Lmax(); Alm< xcomplex< double > > almZ(lmax,lmax); sharp_cxxjob<double> job; job.set_weighted_Healpix_geometry (nside, &weight[0]); job.set_triangular_alm_info (lmax, lmax); job.alm2map_spin(&almin(0,0),&almZ(0,0),&mapRout[0],&mapIout[0],abs(spin),false); } void map2alm_libsharp(Healpix_Map<double> & mapQin, Healpix_Map<double> & mapUin, Alm< xcomplex< double > > & almEout, Alm< xcomplex< double > > & almBout, size_t spin, arr<double> &weight) { int nside = mapQin.Nside(); int lmax=almEout.Lmax(); sharp_cxxjob<double> job; job.set_weighted_Healpix_geometry (nside, &weight[0]); job.set_triangular_alm_info (lmax, lmax); job.map2alm_spin(&mapQin[0],&mapUin[0],&almEout(0,0),&almBout(0,0),spin,false); } void map2alm_libsharp_sY(Healpix_Map<double> & mapRin, Healpix_Map<double> & mapIin, Alm< xcomplex< double > > & almout, int spin, arr<double> &weight) { int nside = mapRin.Nside(); int lmax=almout.Lmax(); Alm< xcomplex< double > > almB(lmax,lmax); sharp_cxxjob<double> job; job.set_weighted_Healpix_geometry (nside, &weight[0]); job.set_triangular_alm_info (lmax, lmax); job.map2alm_spin(&mapRin[0],&mapIin[0],&almout(0,0),&almB(0,0),abs(spin),false); if (spin>=0) {almB.Scale(complex_i);almout.Add(almB);almout.Scale(-1.);} else {almB.Scale(complex_i);almout.Scale(-1.);almout.Add(almB);} } void fast_kernel(Alm< xcomplex< double > > & alm1in, Alm< xcomplex< double > > & alm2in, Alm< xcomplex< double > > & almout, size_t spin, int nside, arr<double> &weight) { size_t lmaxCMB1=alm1in.Lmax(); size_t lmaxCMB2=alm1in.Lmax(); size_t lmax=almout.Lmax(); Healpix_Map<double> map1Q, map1U, map2Q, map2U; map1Q.SetNside(nside,RING); map1U.SetNside(nside,RING); map2Q.SetNside(nside,RING); map2U.SetNside(nside,RING); size_t lmaxtemp=2*nside-1; size_t niter=2; alm2map_libsharp_sY(alm1in,map1Q,map1U,spin,weight); alm2map_libsharp_sY(alm2in,map2Q,map2U,spin,weight); Healpix_Map<double> mapR; mapR.SetNside(nside,RING); #pragma omp parallel for for (int i=0; i< map1Q.Npix(); i++) { mapR[i]=(map1Q[i]*map2Q[i]+map1U[i]*map2U[i]); map1U[i]=(map2Q[i]*map1U[i]-map2U[i]*map1Q[i]); if (mapR[i]!=mapR[i]) mapR[i]=0.0; if (map1U[i]!=map1U[i]) map1U[i]=0.0; } alm1in.Set(lmaxtemp,lmaxtemp); alm2in.Set(lmaxtemp,lmaxtemp); map2alm_iter(mapR, alm1in,niter,weight); map2alm_iter(map1U,alm2in,niter,weight); alm2in.Scale(complex_i); alm1in.Add(alm2in); for (size_t m=0; m<=lmax; ++m) { for (size_t l=m; l<=lmax; ++l) { almout(l,m)=alm1in(l,m); } } } void fast_kernel_bl(Alm< xcomplex< double > > & alm1in, Alm< xcomplex< double > > & alm2in, Alm< xcomplex< double > > & almout, int nside, arr<double> &weight) { size_t lmaxP=alm1in.Lmax(); size_t lmaxE=alm2in.Lmax(); size_t lmax=almout.Lmax(); Healpix_Map<double> map1Q, map2Q, map2U; map1Q.SetNside(nside,RING); map2Q.SetNside(nside,RING); map2U.SetNside(nside,RING); size_t lmaxtemp=lmax;//2*nside-1; alm2map(alm1in,map1Q); alm2map_libsharp_sY(alm2in,map2Q,map2U,2,weight); Healpix_Map<double> mapR, mapI; mapR.SetNside(nside,RING); #pragma omp parallel for for (int i=0; i< map1Q.Npix(); i++) { mapR[i]=-map1Q[i]*map2Q[i]; map2Q[i]=-map2U[i]*map1Q[i]; if (mapR[i]!=mapR[i]) mapR[i]=0.0; if (map2Q[i]!=map2Q[i]) map2Q[i]=0.0; } alm1in.Set(lmaxtemp,lmaxtemp); alm2in.Set(lmaxtemp,lmaxtemp); sharp_cxxjob<double> job; job.set_weighted_Healpix_geometry (nside, &weight[0]); job.set_triangular_alm_info (lmaxtemp, lmaxtemp); job.map2alm_spin(&mapR[0],&map2Q[0],&alm1in(0,0),&alm2in(0,0),2,false); alm1in.Scale(-1.*complex_i); alm1in.Add(alm2in); for (size_t m=0; m<=lmax; ++m) { for (size_t l=m; l<=lmax; ++l) { almout(l,m)=alm1in(l,m); } } if(PyErr_CheckSignals() == -1) { throw invalid_argument( "Keyboard interrupt" ); } } void compute_term_bl(size_t termnum, Alm< xcomplex< double > > & almB, Alm< xcomplex< double > > & almE, Alm< xcomplex< double > > & almP, size_t lminE, size_t lminP, int nside, arr<double> &weight) { size_t lmaxB=almB.Lmax(); size_t lmaxE=almE.Lmax(); size_t lmaxP=almP.Lmax(); Alm< xcomplex< double > > almE_loc(lmaxE,lmaxE), almP_loc(lmaxP,lmaxP); arr<double> weightE, weightP, lsqr, sgnL; weightE.alloc(lmaxE+1); weightP.alloc(lmaxP+1); weightE.fill(.0); weightP.fill(.0); size_t spin=0; lsqr.alloc(max(lmaxE,lmaxP)+1); sgnL.alloc(lmaxB+1); #pragma omp parallel for for (size_t l=0; l<lmaxB+1; l++) { lsqr[l]=l*(l+1.); } #pragma omp parallel for for (size_t l=0; l<max(lmaxE,lmaxP)+1; l++) { sgnL[l]=sgn(l); } #pragma omp parallel for for (size_t l=lminE; l<lmaxE+1; l++) { if (termnum==1) weightE[l]=1.;//wcl.gg(l)/dcl.gg(l); else if (termnum==2) weightE[l]=-1.*(l*(l+1.));//wcl.gg(l)/dcl.gg(l); else if (termnum==3) weightE[l]=-1.;//wcl.gg(l)/dcl.gg(l); else if (termnum==4) weightE[l]=-1.*sgn(l);//wcl.gg(l)/dcl.gg(l); else if (termnum==5) weightE[l]=1.*sgn(l)*(l*(l+1.));//wcl.gg(l)/dcl.gg(l); else if (termnum==6) weightE[l]=1.*sgn(l);//wcl.gg(l)/dcl.gg(l); if (weightE[l]!=weightE[l]) weightE[l]=.0; } #pragma omp parallel for for (size_t l=lminP; l<lmaxP+1; l++) { if (termnum==1) weightP[l]=1.;//wcl.tt(l)/dcl.tt(l); else if (termnum==2) weightP[l]=1.;//wcl.tt(l)/dcl.tt(l); else if (termnum==3) weightP[l]=1.*(l*(l+1.));//wcl.tt(l)/dcl.tt(l); else if (termnum==4) weightP[l]=1.*sgn(l);//wcl.tt(l)/dcl.tt(l); else if (termnum==5) weightP[l]=1.*sgn(l);//wcl.tt(l)/dcl.tt(l); else if (termnum==6) weightP[l]=1.*(l*(l+1.))*sgn(l);//wcl.tt(l)/dcl.tt(l); if (weightP[l]!=weightP[l]) weightP[l]=.0; } #pragma omp parallel for for (size_t m=0; m<=lmaxE; ++m) { for (size_t l=m; l<=lmaxE; ++l) { almE_loc(l,m)=weightE[l]*almE(l,m); } } #pragma omp parallel for for (size_t m=0; m<=lmaxP; ++m) { for (size_t l=m; l<=lmaxP; ++l) { almP_loc(l,m)=weightP[l]*almP(l,m); } } Alm< xcomplex< double > > almout(lmaxB,lmaxB); fast_kernel_bl(almP_loc,almE_loc,almout,nside,weight); if (termnum==4 || termnum==5 || termnum==6) {almout.ScaleL(sgnL);} if (termnum==1 || termnum==4) almout.ScaleL(lsqr); almB.Add(almout); } void btemp(Alm< xcomplex< double > > &almB, Alm< xcomplex< double > > &almE, Alm< xcomplex< double > > &almP, int lminB, int lminE, int lminP, int nside) { size_t lmaxB=almB.Lmax(); almB.SetToZero(); arr<double> weight; weight.alloc(2*nside); weight.fill(1.0); size_t nterms=6; for (size_t i=1; i<=nterms; i++) { compute_term_bl(i, almB, almE, almP, lminE, lminP, nside, weight); } for (size_t m=0; m<=lmaxB; ++m) { for (size_t l=m; l<=lmaxB; ++l) { almB(l,m)*=.25; if (l<lminB) almB(l,m)=0.; if (m==0) almB(l,m)=almB(l,m).real(); } } } void compute_term_noise(int type, size_t termnum, Alm< xcomplex< double > > & alm1in, Alm< xcomplex< double > > & alm2in, Alm< xcomplex< double > > & almN, PowSpec& wcl, PowSpec& dcl, size_t lmin, size_t lminCMB, int nside, arr<double> &weight) { size_t lmaxCMB=alm1in.Lmax(); size_t lmax=almN.Lmax(); Alm< xcomplex< double > > alm1(lmaxCMB,lmaxCMB), alm2(lmaxCMB,lmaxCMB); arr<double> weight1, weight2, lsqr, sgnL; weight1.alloc(lmaxCMB+1); weight2.alloc(lmaxCMB+1); weight1.fill(.0); weight2.fill(.0); size_t spin=0; lsqr.alloc(lmax+1); sgnL.alloc(lmax+1); #pragma omp parallel for (size_t l=0; l<lmax+1; l++) { lsqr[l]=l*(l+1.); sgnL[l]=sgn(l); } if (type==tt) { #pragma omp parallel for (size_t l=lminCMB; l<lmaxCMB+1; l++) { weight1[l]=1./dcl.tt(l); weight2[l]=1./dcl.tt(l); } spin=0; } else if (type==te) { #pragma omp parallel for (size_t l=lminCMB; l<lmaxCMB+1; l++) { weight1[l]=1./dcl.tt(l); weight2[l]=1./dcl.gg(l); } spin=2; } else if (type==ee) { #pragma omp parallel for (size_t l=lminCMB; l<lmaxCMB+1; l++) { weight1[l]=1./dcl.gg(l); weight2[l]=1./dcl.gg(l); } spin=2; } else if (type==tb) { #pragma omp parallel for (size_t l=lminCMB; l<lmaxCMB+1; l++) { weight1[l]=1./dcl.tt(l); weight2[l]=1./dcl.cc(l); } spin=2; } else if (type==eb) { #pragma omp parallel for (size_t l=lminCMB; l<lmaxCMB+1; l++) { weight1[l]=1./dcl.gg(l); weight2[l]=1./dcl.cc(l); } spin=2; } else if (type==bb) { #pragma omp parallel for (size_t l=lminCMB; l<lmaxCMB+1; l++) { weight1[l]=1./dcl.cc(l); weight2[l]=1./dcl.cc(l); } spin=2; } alm1=alm1in; alm1.ScaleL(weight1); alm2=alm2in; alm2.ScaleL(weight2); Alm< xcomplex< double > > almout(lmax,lmax); fast_kernel(alm1,alm2,almout,spin,nside,weight); if (type==tt) almout.Scale(.5); else if (type==te) almout.Scale(1.); else if (type==ee) almout.Scale(.5); else if (type==tb) almout.Scale(1.); else if (type==eb) almout.Scale(1.); else if (type==bb) almout.Scale(.5); almN.Add(almout); if(PyErr_CheckSignals() == -1) { throw invalid_argument( "Keyboard interrupt" ); } } void est_noise(Alm< xcomplex< double > > &alm1, Alm< xcomplex< double > > &alm2, std::string stype, Alm< xcomplex< double > > &almN, PowSpec& wcl, PowSpec& dcl, int lmin, int lminCMB, int nside) { size_t lmax=almN.Lmax(); almN.SetToZero(); int type = string2esttype(stype); arr<double> weight; weight.alloc(2*nside); weight.fill(1.0); size_t nterms=1; for (size_t i=1; i<=nterms; i++) { compute_term_noise(type, i, alm1, alm2, almN, wcl, dcl, lmin, lminCMB, nside, weight); } for (size_t m=0; m<=lmax; ++m) { for (size_t l=m; l<=lmax; ++l) { if (l<lmin) almN(l,m)=0.; if (m==0) almN(l,m)=almN(l,m).real(); } } } void compute_term_mask(int type, size_t termnum, Alm< xcomplex< double > > & alm1in, Alm< xcomplex< double > > & alm2in, Alm< xcomplex< double > > & almM, PowSpec& wcl, PowSpec& dcl, size_t lmin, size_t lminCMB1, size_t lminCMB2, size_t lmaxCMB1, size_t lmaxCMB2, int nside, arr<double> &weight) { size_t lmax=almM.Lmax(); size_t lmaxCMB=max(lmaxCMB1,lmaxCMB2); Alm< xcomplex< double > > alm1(lmaxCMB,lmaxCMB), alm2(lmaxCMB,lmaxCMB); arr<double> weight1, weight2, lsqr, sgnL; weight1.alloc(lmaxCMB1+1); weight2.alloc(lmaxCMB2+1); weight1.fill(.0); weight2.fill(.0); size_t spin=0; lsqr.alloc(lmax+1); sgnL.alloc(lmax+1); #pragma omp parallel for (size_t l=0; l<lmax+1; l++) { lsqr[l]=l*(l+1.); sgnL[l]=sgn(l); } if (type==tt) { #pragma omp parallel for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=wcl.tt(l)/dcl.tt(l); else if (termnum==2) weight1[l]=sgn(l)/dcl.tt(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=1./dcl.tt(l); else if (termnum==2) weight2[l]=wcl.tt(l)*sgn(l)/dcl.tt(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=0; } else if (type==te) { #pragma omp parallel for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=wcl.tg(l)/dcl.tt(l); else if (termnum==2) weight1[l]=wcl.tg(l)*sgn(l)/dcl.tt(l); else if (termnum==3) weight1[l]=1./dcl.tt(l); else if (termnum==4) weight1[l]=sgn(l)/dcl.tt(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=1./dcl.gg(l); else if (termnum==2) weight2[l]=sgn(l)/dcl.gg(l); else if (termnum==3) weight2[l]=wcl.tg(l)/dcl.gg(l); else if (termnum==4) weight2[l]=wcl.tg(l)*sgn(l)/dcl.gg(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } if (termnum<=2) spin=2; else spin=0; } else if (type==ee) { #pragma omp parallel for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=wcl.gg(l)/dcl.gg(l); else if (termnum==2) weight1[l]=sgn(l)*wcl.gg(l)/dcl.gg(l); else if (termnum==3) weight1[l]=1./dcl.gg(l); else if (termnum==4) weight1[l]=sgn(l)/dcl.gg(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=1./dcl.gg(l); else if (termnum==2) weight2[l]=sgn(l)/dcl.gg(l); else if (termnum==3) weight2[l]=wcl.gg(l)/dcl.gg(l); else if (termnum==4) weight2[l]=sgn(l)*wcl.gg(l)/dcl.gg(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=2; } else if (type==tb) { #pragma omp parallel for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=-wcl.tg(l)/dcl.tt(l); else if (termnum==2) weight1[l]=sgn(l)*wcl.tg(l)/dcl.tt(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=1./dcl.cc(l); else if (termnum==2) weight2[l]=sgn(l)/dcl.cc(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=2; } else if (type==eb) { #pragma omp parallel for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=-wcl.gg(l)/dcl.gg(l); else if (termnum==2) weight1[l]=sgn(l)*wcl.gg(l)/dcl.gg(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=1./dcl.cc(l); else if (termnum==2) weight2[l]=sgn(l)/dcl.cc(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=2; } else if (type==bb) { #pragma omp parallel for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=wcl.cc(l)/dcl.cc(l); else if (termnum==2) weight1[l]=sgn(l)*wcl.cc(l)/dcl.cc(l); else if (termnum==3) weight1[l]=1./dcl.cc(l); else if (termnum==4) weight1[l]=sgn(l)/dcl.cc(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=1./dcl.cc(l); else if (termnum==2) weight2[l]=sgn(l)/dcl.cc(l); else if (termnum==3) weight2[l]=wcl.cc(l)/dcl.cc(l); else if (termnum==4) weight2[l]=sgn(l)*wcl.cc(l)/dcl.cc(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=2; } #pragma omp parallel for for (size_t m=0; m<=lmaxCMB1; ++m) { for (size_t l=m; l<=lmaxCMB1; ++l) { alm1(l,m)=weight1[l]*alm1in(l,m); } } #pragma omp parallel for for (size_t m=0; m<=lmaxCMB2; ++m) { for (size_t l=m; l<=lmaxCMB2; ++l) { alm2(l,m)=weight2[l]*alm2in(l,m); } } Alm< xcomplex< double > > almout(lmax,lmax); fast_kernel(alm1,alm2,almout,spin,nside,weight); if (termnum==2 || termnum==4) almout.ScaleL(sgnL); if (type==tt) almout.Scale(1.); else if (type==te) almout.Scale(1.); else if (type==ee) almout.Scale(.5); else if (type==tb) almout.Scale(1.*complex_i); else if (type==eb) almout.Scale(1.*complex_i); else if (type==bb) almout.Scale(.5); almM.Add(almout); if(PyErr_CheckSignals() == -1) { throw invalid_argument( "Keyboard interrupt" ); } } void est_mask(Alm< xcomplex< double > > &alm1, Alm< xcomplex< double > > &alm2, std::string stype, Alm< xcomplex< double > > &almM, PowSpec& wcl, PowSpec& dcl, int lmin, int lminCMB1, int lminCMB2, int lmaxCMB1, int lmaxCMB2, int nside) { size_t lmax=almM.Lmax(); almM.SetToZero(); int type = string2esttype(stype); arr<double> weight; weight.alloc(2*nside); weight.fill(1.0); size_t nterms; if (type==tt || type==tb || type==eb) nterms=2; else nterms=4; for (size_t i=1; i<=nterms; i++) { compute_term_mask(type, i, alm1, alm2, almM, wcl, dcl, lmin, lminCMB1, lminCMB2, lmaxCMB1, lmaxCMB2, nside, weight); } for (size_t m=0; m<=lmax; ++m) { for (size_t l=m; l<=lmax; ++l) { almM(l,m)*=.5; if (l<lmin) almM(l,m)=0.; if (m==0) almM(l,m)=almM(l,m).real(); } } } std::vector<double> lensBB(std::vector<double> &clEE, std::vector<double> &clDD, size_t lmax_out, bool even) { int lmax_EE=clEE.size()-1; int lmax_DD=clDD.size()-1; std::vector<double> out(lmax_out+1, 0.); std::vector< std::vector<double> > F(lmax_DD+1, std::vector<double>(lmax_EE+1,0.)); for (size_t l1=0;l1<lmax_out+1;l1++) { compF(F, l1, 2, lmax_DD+1, lmax_EE+1); double Aout=0.; if (even==true){ if (l1>=2) { #pragma omp parallel for reduction(+:Aout) for (size_t L=2;L<lmax_DD+1;L++) { for (size_t l2=2;l2<lmax_EE+1;l2++) { if ((l1+L+l2)%2==0) { Aout+=F[L][l2]*F[L][l2]*clDD[L]*clEE[l2]; } } } } } else { if (l1>=2) { #pragma omp parallel for reduction(+:Aout) for (size_t L=2;L<lmax_DD+1;L++) { for (size_t l2=2;l2<lmax_EE+1;l2++) { if ((l1+L+l2)%2!=0) { Aout+=F[L][l2]*F[L][l2]*clDD[L]*clEE[l2]; } } } } } out[l1]=Aout*1./(2.*l1+1.); if(PyErr_CheckSignals() == -1) { throw invalid_argument( "Keyboard interrupt" ); } } return out; } void compute_term(int type, size_t termnum, Alm< xcomplex< double > > & alm1in, Alm< xcomplex< double > > & alm2in, Alm< xcomplex< double > > & almP, PowSpec& wcl, PowSpec& dcl, size_t lmin, size_t lminCMB1, size_t lminCMB2, size_t lmaxCMB1, size_t lmaxCMB2, int nside, arr<double> &weight) { size_t lmax=almP.Lmax(); size_t lmaxCMB=max(lmaxCMB1,lmaxCMB2); Alm< xcomplex< double > > alm1(lmaxCMB,lmaxCMB), alm2(lmaxCMB,lmaxCMB); arr<double> weight1, weight2, lsqr, sgnL; weight1.alloc(lmaxCMB1+1); weight2.alloc(lmaxCMB2+1); weight1.fill(.0); weight2.fill(.0); size_t spin=0; lsqr.alloc(lmax+1); sgnL.alloc(lmax+1); #pragma omp parallel for for (size_t l=0; l<lmax+1; l++) { lsqr[l]=l*(l+1.); sgnL[l]=sgn(l); } if (type==tt) { #pragma omp parallel for for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=-wcl.tt(l)/dcl.tt(l); else if (termnum==2) weight1[l]=wcl.tt(l)/dcl.tt(l); else if (termnum==3) weight1[l]=wcl.tt(l)*(l*(l+1.))/dcl.tt(l); else if (termnum==4) weight1[l]=-(l*(l+1.))*sgn(l)/dcl.tt(l); else if (termnum==5) weight1[l]=1.*sgn(l)/dcl.tt(l); else if (termnum==6) weight1[l]=1.*sgn(l)/dcl.tt(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=(l*(l+1.))/dcl.tt(l); else if (termnum==2) weight2[l]=1./dcl.tt(l); else if (termnum==3) weight2[l]=1./dcl.tt(l); else if (termnum==4) weight2[l]=wcl.tt(l)*sgn(l)/dcl.tt(l); else if (termnum==5) weight2[l]=wcl.tt(l)*sgn(l)/dcl.tt(l); else if (termnum==6) weight2[l]=wcl.tt(l)*(l*(l+1.))*sgn(l)/dcl.tt(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=0; } else if (type==te) { #pragma omp parallel for for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=-wcl.tg(l)/dcl.tt(l); else if (termnum==2) weight1[l]=wcl.tg(l)/dcl.tt(l); else if (termnum==3) weight1[l]=wcl.tg(l)*(l*(l+1.))/dcl.tt(l); else if (termnum==4) weight1[l]=-wcl.tg(l)*sgn(l)/dcl.tt(l); else if (termnum==5) weight1[l]=wcl.tg(l)*sgn(l)/dcl.tt(l); else if (termnum==6) weight1[l]=wcl.tg(l)*sgn(l)*(l*(l+1.))/dcl.tt(l); else if (termnum==7) weight1[l]=-(l*(l+1.))/dcl.tt(l); else if (termnum==8) weight1[l]=1./dcl.tt(l); else if (termnum==9) weight1[l]=1./dcl.tt(l); else if (termnum==10) weight1[l]=-(l*(l+1.))*sgn(l)/dcl.tt(l); else if (termnum==11) weight1[l]=sgn(l)/dcl.tt(l); else if (termnum==12) weight1[l]=sgn(l)*1./dcl.tt(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=(l*(l+1.))/dcl.gg(l); else if (termnum==2) weight2[l]=1./dcl.gg(l); else if (termnum==3) weight2[l]=1./dcl.gg(l); else if (termnum==4) weight2[l]=(l*(l+1.))*sgn(l)/dcl.gg(l); else if (termnum==5) weight2[l]=sgn(l)/dcl.gg(l); else if (termnum==6) weight2[l]=1.*sgn(l)/dcl.gg(l); else if (termnum==7) weight2[l]=wcl.tg(l)/dcl.gg(l); else if (termnum==8) weight2[l]=wcl.tg(l)/dcl.gg(l); else if (termnum==9) weight2[l]=(l*(l+1.))*wcl.tg(l)/dcl.gg(l); else if (termnum==10) weight2[l]=sgn(l)*wcl.tg(l)/dcl.gg(l); else if (termnum==11) weight2[l]=sgn(l)*wcl.tg(l)/dcl.gg(l); else if (termnum==12) weight2[l]=(l*(l+1.))*sgn(l)*wcl.tg(l)/dcl.gg(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } if (termnum<=6) spin=2; else spin=0; } else if (type==ee) { #pragma omp parallel for for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=-wcl.gg(l)/dcl.gg(l); else if (termnum==2) weight1[l]=wcl.gg(l)/dcl.gg(l); else if (termnum==3) weight1[l]=wcl.gg(l)*(l*(l+1.))/dcl.gg(l); else if (termnum==4) weight1[l]=-wcl.gg(l)*sgn(l)/dcl.gg(l); else if (termnum==5) weight1[l]=wcl.gg(l)*sgn(l)/dcl.gg(l); else if (termnum==6) weight1[l]=wcl.gg(l)*(l*(l+1.))*sgn(l)/dcl.gg(l); else if (termnum==7) weight1[l]=-(l*(l+1.))/dcl.gg(l); else if (termnum==8) weight1[l]=1./dcl.gg(l); else if (termnum==9) weight1[l]=1./dcl.gg(l); else if (termnum==10) weight1[l]=-sgn(l)*(l*(l+1.))/dcl.gg(l); else if (termnum==11) weight1[l]=sgn(l)*1./dcl.gg(l); else if (termnum==12) weight1[l]=sgn(l)*1./dcl.gg(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=(l*(l+1.))/dcl.gg(l); else if (termnum==2) weight2[l]=1./dcl.gg(l); else if (termnum==3) weight2[l]=1./dcl.gg(l); else if (termnum==4) weight2[l]=(l*(l+1.))*sgn(l)/dcl.gg(l); else if (termnum==5) weight2[l]=sgn(l)*1./dcl.gg(l); else if (termnum==6) weight2[l]=sgn(l)*1./dcl.gg(l); else if (termnum==7) weight2[l]=wcl.gg(l)/dcl.gg(l); else if (termnum==8) weight2[l]=wcl.gg(l)/dcl.gg(l); else if (termnum==9) weight2[l]=wcl.gg(l)*(l*(l+1.))/dcl.gg(l); else if (termnum==10) weight2[l]=wcl.gg(l)*sgn(l)/dcl.gg(l); else if (termnum==11) weight2[l]=wcl.gg(l)*sgn(l)/dcl.gg(l); else if (termnum==12) weight2[l]=wcl.gg(l)*(l*(l+1.))*sgn(l)/dcl.gg(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=2; } else if (type==tb) { #pragma omp parallel for for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=wcl.tg(l)/dcl.tt(l); else if (termnum==2) weight1[l]=-wcl.tg(l)/dcl.tt(l); else if (termnum==3) weight1[l]=-wcl.tg(l)*(l*(l+1.))/dcl.tt(l); else if (termnum==4) weight1[l]=-wcl.tg(l)*sgn(l)/dcl.tt(l); else if (termnum==5) weight1[l]=wcl.tg(l)*sgn(l)/dcl.tt(l); else if (termnum==6) weight1[l]=wcl.tg(l)*(l*(l+1.))*sgn(l)/dcl.tt(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=(l*(l+1.))/dcl.cc(l); else if (termnum==2) weight2[l]=1./dcl.cc(l); else if (termnum==3) weight2[l]=1./dcl.cc(l); else if (termnum==4) weight2[l]=(l*(l+1.))*sgn(l)/dcl.cc(l); else if (termnum==5) weight2[l]=sgn(l)*1./dcl.cc(l); else if (termnum==6) weight2[l]=sgn(l)*1./dcl.cc(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=2; } else if (type==eb) { #pragma omp parallel for for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=wcl.gg(l)/dcl.gg(l); else if (termnum==2) weight1[l]=-wcl.gg(l)/dcl.gg(l); else if (termnum==3) weight1[l]=-wcl.gg(l)*(l*(l+1.))/dcl.gg(l); else if (termnum==4) weight1[l]=-wcl.gg(l)*sgn(l)/dcl.gg(l); else if (termnum==5) weight1[l]=wcl.gg(l)*sgn(l)/dcl.gg(l); else if (termnum==6) weight1[l]=wcl.gg(l)*(l*(l+1.))*sgn(l)/dcl.gg(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=(l*(l+1.))/dcl.cc(l); else if (termnum==2) weight2[l]=1./dcl.cc(l); else if (termnum==3) weight2[l]=1./dcl.cc(l); else if (termnum==4) weight2[l]=(l*(l+1.))*sgn(l)/dcl.cc(l); else if (termnum==5) weight2[l]=sgn(l)*1./dcl.cc(l); else if (termnum==6) weight2[l]=sgn(l)*1./dcl.cc(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=2; } else if (type==bb) { #pragma omp parallel for for (size_t l=lminCMB1; l<lmaxCMB1+1; l++) { if (termnum==1) weight1[l]=-wcl.cc(l)/dcl.cc(l); else if (termnum==2) weight1[l]=wcl.cc(l)/dcl.cc(l); else if (termnum==3) weight1[l]=wcl.cc(l)*(l*(l+1.))/dcl.cc(l); else if (termnum==4) weight1[l]=-wcl.cc(l)*sgn(l)/dcl.cc(l); else if (termnum==5) weight1[l]=wcl.cc(l)*sgn(l)/dcl.cc(l); else if (termnum==6) weight1[l]=wcl.cc(l)*(l*(l+1.))*sgn(l)/dcl.cc(l); else if (termnum==7) weight1[l]=-(l*(l+1.))/dcl.cc(l); else if (termnum==8) weight1[l]=1./dcl.cc(l); else if (termnum==9) weight1[l]=1./dcl.cc(l); else if (termnum==10) weight1[l]=-sgn(l)*(l*(l+1.))/dcl.cc(l); else if (termnum==11) weight1[l]=sgn(l)*1./dcl.cc(l); else if (termnum==12) weight1[l]=sgn(l)*1./dcl.cc(l); if (weight1[l]!=weight1[l]) weight1[l]=.0; } #pragma omp parallel for for (size_t l=lminCMB2; l<lmaxCMB2+1; l++) { if (termnum==1) weight2[l]=(l*(l+1.))/dcl.cc(l); else if (termnum==2) weight2[l]=1./dcl.cc(l); else if (termnum==3) weight2[l]=1./dcl.cc(l); else if (termnum==4) weight2[l]=(l*(l+1.))*sgn(l)/dcl.cc(l); else if (termnum==5) weight2[l]=sgn(l)*1./dcl.cc(l); else if (termnum==6) weight2[l]=sgn(l)*1./dcl.cc(l); else if (termnum==7) weight2[l]=wcl.cc(l)/dcl.cc(l); else if (termnum==8) weight2[l]=wcl.cc(l)/dcl.cc(l); else if (termnum==9) weight2[l]=wcl.cc(l)*(l*(l+1.))/dcl.cc(l); else if (termnum==10) weight2[l]=wcl.cc(l)*sgn(l)/dcl.cc(l); else if (termnum==11) weight2[l]=wcl.cc(l)*sgn(l)/dcl.cc(l); else if (termnum==12) weight2[l]=wcl.cc(l)*(l*(l+1.))*sgn(l)/dcl.cc(l); if (weight2[l]!=weight2[l]) weight2[l]=.0; } spin=2; } #pragma omp parallel for for (size_t m=0; m<=lmaxCMB1; ++m) { for (size_t l=m; l<=lmaxCMB1; ++l) { alm1(l,m)=weight1[l]*alm1in(l,m); } } #pragma omp parallel for for (size_t m=0; m<=lmaxCMB2; ++m) { for (size_t l=m; l<=lmaxCMB2; ++l) { alm2(l,m)=weight2[l]*alm2in(l,m); } } Alm< xcomplex< double > > almout(lmax,lmax); fast_kernel(alm1,alm2,almout,spin,nside,weight); if (termnum==2 || termnum==5 || termnum==8 || termnum==11) almout.ScaleL(lsqr); if (termnum==4 || termnum==5 || termnum==6 || termnum==10 || termnum==11 || termnum==12) almout.ScaleL(sgnL); if (type==tt) almout.Scale(1.); else if (type==te) almout.Scale(1.); else if (type==ee) almout.Scale(.5); else if (type==tb) almout.Scale(1.*complex_i); else if (type==eb) almout.Scale(1.*complex_i); else if (type==bb) almout.Scale(.5); almP.Add(almout); if(PyErr_CheckSignals() == -1) { throw invalid_argument( "Keyboard interrupt" ); } }
OpenMP.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <math.h> #define array_size 2000000 //function to perform the final calculation for the pearson correlation double calculate_pearson(double pearson_array[5]); int main(int argc, char* argv[]) { //********DECLARE VARIABLES************// /*******SERIAL VARIABLES******/ double start_serial, end_serial; // timer variables //pointers for arrays double *x; double *y; double *xy; double *xsqr; double *ysqr; int i;//iterations //sum variables double xsum; double ysum; double xysum; double xsqr_sum; double ysqr_sum; xsum = ysum = xysum = xsqr_sum = ysqr_sum = 0; double sums_array_serial[5];//array to hold sums for final calculation double coeff_serial; // pearson coeff from serial code /*****PARALLEL VARIABLES******/ double start_parallel, end_parallel; // timer variables //pointers for arrays double *p_x; double *p_y; double *p_xy; double *p_xsqr; double *p_ysqr; double p_xsum; double p_ysum; double p_xysum; double p_xsqr_sum; double p_ysqr_sum; p_xsum = p_ysum = p_xysum = p_xsqr_sum = p_ysqr_sum = 0; double sums_array_parallel[5];//array to hold sums for final calculations double coeff_parallel; //pearson coeff from parallel code /********************* SERIAL CODE ************************************/ //start timer start_serial = omp_get_wtime(); #pragma omp flush(start_serial) // use flush to make sure command is exectued now e.g. make sure we get an accurate time recording //allocate memory for the arrays x = (double *)malloc(array_size * sizeof(double)); y = (double *)malloc(array_size * sizeof(double)); xy = (double *)malloc( array_size * sizeof(double)); xsqr = (double *)malloc( array_size * sizeof(double)); ysqr = (double *)malloc( array_size * sizeof(double)); /* calculate: xsum, ysum, xysum, xsqr_sum, ysqr_sum */ for (i = 0; i < array_size ; i++) { x[i] = sin(i); y[i] = sin(i+2); xy[i] = x[i] * y[i]; xsqr[i] = x[i] * x[i]; ysqr[i] = y[i] * y[i]; xsum += x[i]; ysum += y[i]; xysum += xy[i]; xsqr_sum += xsqr[i]; ysqr_sum += ysqr[i]; } free(x); free(y); free(xy); free(xsqr); free(ysqr); //assign values to the array ready to do the final calculation sums_array_serial[0] = xsum; sums_array_serial[1] = ysum; sums_array_serial[2] = xysum; sums_array_serial[3] = xsqr_sum; sums_array_serial[4] = ysqr_sum; /* calculate pearson*/ coeff_serial = calculate_pearson(sums_array_serial); //end timer end_serial = omp_get_wtime(); #pragma omp flush(end_serial) //* pritn schedule type**// const char* s = getenv("OMP_SCHEDULE"); printf("Schedule type and chunk size: %s\n", s); //* print the result */ printf("Serial - Pearson Correlation Coefficient : %f\n", coeff_serial); //print run time printf("Serial time: %1.2f\n", end_serial-start_serial); /******************* PARALLEL CODE ************************************/ //start timer start_parallel = omp_get_wtime(); #pragma omp flush(start_parallel) //allocate memory for the arrays p_x = (double *)malloc(array_size * sizeof(double)); p_y = (double *)malloc(array_size * sizeof(double)); p_xy = (double *)malloc(array_size * sizeof(double)); p_xsqr = (double *)malloc(array_size * sizeof(double)); p_ysqr = (double *)malloc(array_size * sizeof(double)); //calculate: xsum, ysum, xysum, xsqr_sum, ysqr_sum in parallel #pragma omp parallel for \ reduction(+:p_xsum, p_ysum, p_xysum, p_xsqr_sum, p_ysqr_sum)\ schedule(runtime) for(i = 0 ; i < array_size ; i++){ p_x[i] = sin(i); p_y[i] = sin(i+2); p_xy[i] = p_x[i] * p_y[i]; p_xsqr[i] = p_x[i] * p_x[i]; p_ysqr[i] = p_y[i] * p_y[i]; p_xsum += p_x[i]; p_ysum += p_y[i]; p_xysum += p_xy[i]; p_xsqr_sum += p_xsqr[i]; p_ysqr_sum += p_ysqr[i]; } free(p_x); free(p_y); free(p_xy); free(p_xsqr); free(p_ysqr); //assign values to the array ready to do the final calculation sums_array_parallel[0] += p_xsum; sums_array_parallel[1] += p_ysum; sums_array_parallel[2] += p_xysum; sums_array_parallel[3] += p_xsqr_sum; sums_array_parallel[4] += p_ysqr_sum; /* calculate pearson */ coeff_parallel = calculate_pearson(sums_array_parallel); //end timer end_parallel = omp_get_wtime(); #pragma omp flush(end_parallel) //print result printf("Parallel - Pearson Correlation Coefficient: %f\n", coeff_parallel); //print run time printf("Parallel time: %1.2f\n", end_parallel-start_parallel); //print speed up printf("Speed up: %1.2f\n \n", (end_serial-start_serial)/(end_parallel-start_parallel)); return 0; } //this function takes the results from the calcuate_sums function and calculates the pearson coefficient// //see report for the formula used double calculate_pearson(double pearson_array[5]){ double num; //numerator double deno; //denominator //calculate the numerator num = (pearson_array[2] - (pearson_array[0] * pearson_array[1]/array_size)); //calculate the denominator deno = (pearson_array[3] - (pearson_array[0] * pearson_array[0]/array_size)) * (pearson_array[4] - (pearson_array[1] * pearson_array[1]/array_size)); //calculate correlation coefficient return num / sqrt(deno); }
axpy_ompacc_mpi.c
// Experimental test input for directive-guided MPI code generation // simplest scalar*vector operations // Testing extensions for MPI processes // Liao 10/27/2015 // vector = vector + vector * scalar #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <assert.h> #include <omp.h> #define REAL double #define VEC_LEN 1024000 //use a fixed number for now /* zero out the entire vector */ void zero(REAL *A, int n) { int i; for (i = 0; i < n; i++) { A[i] = 0.0; } } /* initialize a vector with random floating point numbers */ void init(REAL *A, int n) { int i; for (i = 0; i < n; i++) { A[i] = (REAL)drand48(); } } REAL check(REAL*A, REAL*B, int n) { int i; REAL diffsum =0.0, sum = 0.0; for (i = 0; i < n; i++) { diffsum += fabs(A[i] - B[i]); sum += fabs(B[i]); } return diffsum/sum; } /* CPU version */ void axpy(REAL* x, REAL* y, long n, REAL a) { int i; #pragma omp parallel for shared(x, y, n, a) private(i) for (i = 0; i < n; ++i) { y[i] += a * x[i]; } } int main(int argc, char *argv[]) { int n,i; REAL *y_ref, *y_ompacc, *x; REAL a = 123.456f; #pragma omp target device(mpi:all) begin n = VEC_LEN; if (argc >= 2) n = atoi(argv[1]); #pragma omp target device(mpi:all) end #pragma omp target device(mpi:master) begin y_ref = (REAL *) malloc(n * sizeof(REAL)); y_ompacc = (REAL *) malloc(n * sizeof(REAL)); x = (REAL *) malloc(n * sizeof(REAL)); #pragma omp target device(mpi:master) end srand48(1<<12); init(x, n); init(y_ref, n); memcpy(y_ompacc, y_ref, n*sizeof(REAL)); // test directives for MPI code generation: mpi:all means spawn on all MPI processes #pragma omp target device(mpi) map(tofrom: y_ompacc[0:n] dist_data(block, duplicate,block)) map(to: x[0:n] dist_data(block),a,n) #pragma omp parallel for shared(x, y_ompacc, n, a) private(i) for (i = 0; i < n; ++i) y_ompacc[i] += a * x[i]; int num_threads; #pragma omp parallel shared (num_threads) { if (omp_get_thread_num() == 0) num_threads = omp_get_num_threads(); } // serial version axpy(x, y_ref, n, a); REAL checksum = check(y_ref, y_ompacc, n); printf("axpy(%d): checksum: %g\n", n, checksum); assert (checksum < 1.0e-10); free(y_ref); free(y_ompacc); free(x); return 0; }
GB_unaryop__abs_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_fp32 // op(A') function: GB_tran__abs_int64_fp32 // C type: int64_t // A type: float // cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64) // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z ; GB_CAST_SIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_fp32 ( int64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
relic_cp_rsa.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (c) 2009 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or modify it under the * terms of the version 2.1 (or later) of the GNU Lesser General Public License * as published by the Free Software Foundation; or version 2.0 of the Apache * License as published by the Apache Software Foundation. See the LICENSE files * for more details. * * RELIC is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the LICENSE files for more details. * * You should have received a copy of the GNU Lesser General Public or the * Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/> * or <https://www.apache.org/licenses/>. */ /** * @file * * Implementation of the RSA cryptosystem. * * @ingroup cp */ #include "relic.h" /*============================================================================*/ /* Private definitions */ /*============================================================================*/ /** * Length of chosen padding scheme. */ #if CP_RSAPD == PKCS1 #define RSA_PAD_LEN (11) #elif CP_RSAPD == PKCS2 #define RSA_PAD_LEN (2 * RLC_MD_LEN + 2) #else #define RSA_PAD_LEN (2) #endif /** * Identifier for encrypted messages. */ #define RSA_PUB (02) /** * Identifier for signed messages. */ #define RSA_PRV (01) /** * Byte used as padding unit. */ #define RSA_PAD (0xFF) /** * Byte used as padding unit in PSS signatures. */ #define RSA_PSS (0xBC) /** * Identifier for encryption. */ #define RSA_ENC 1 /** * Identifier for decryption. */ #define RSA_DEC 2 /** * Identifier for signature. */ #define RSA_SIG 3 /** * Identifier for verification. */ #define RSA_VER 4 /** * Identifier for second encryption step. */ #define RSA_ENC_FIN 5 /** * Identifier for second sining step. */ #define RSA_SIG_FIN 6 /** * Identifier for signature of a precomputed hash. */ #define RSA_SIG_HASH 7 /** * Identifier for verification of a precomputed hash. */ #define RSA_VER_HASH 8 #if CP_RSAPD == BASIC /** * Applies or removes simple encryption padding. * * @param[out] m - the buffer to pad. * @param[out] p_len - the number of added pad bytes. * @param[in] m_len - the message length in bytes. * @param[in] k_len - the key length in bytes. * @param[in] operation - flag to indicate the operation type. * @return RLC_ERR if errors occurred, RLC_OK otherwise. */ static int pad_basic(bn_t m, int *p_len, int m_len, int k_len, int operation) { uint8_t pad = 0; int result = RLC_ERR; bn_t t; RLC_TRY { bn_null(t); bn_new(t); switch (operation) { case RSA_ENC: case RSA_SIG: case RSA_SIG_HASH: /* EB = 00 | FF | D. */ bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PAD); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); result = RLC_OK; break; case RSA_DEC: case RSA_VER: case RSA_VER_HASH: /* EB = 00 | FF | D. */ m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (bn_is_zero(t)) { *p_len = 1; do { (*p_len)++; m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad == 0 && m_len > 0); if (pad == RSA_PAD) { result = RLC_OK; } bn_mod_2b(m, m, (k_len - *p_len) * 8); } break; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); } return result; } #endif #if CP_RSAPD == PKCS1 /** * ASN.1 identifier of the hash function SHA-224. */ static const uint8_t sh224_id[] = { 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c }; /** * ASN.1 identifier of the hash function SHA-256. */ static const uint8_t sh256_id[] = { 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 }; /** * ASN.1 identifier of the hash function SHA-384. */ static const uint8_t sh384_id[] = { 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30 }; /** * ASN.1 identifier of the hash function SHA-512. */ static const uint8_t sh512_id[] = { 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40 }; /** * Returns a pointer to the ASN.1 identifier of a hash function according to the * PKCS#1 v1.5 padding standard. * * @param[in] md - the hash function. * @param[in, out] len - the length of the identifier. * @return The pointer to the hash function identifier. */ static uint8_t *hash_id(int md, int *len) { switch (md) { case SH224: *len = sizeof(sh224_id); return (uint8_t *)sh224_id; case SH256: *len = sizeof(sh256_id); return (uint8_t *)sh256_id; case SH384: *len = sizeof(sh384_id); return (uint8_t *)sh384_id; case SH512: *len = sizeof(sh512_id); return (uint8_t *)sh512_id; default: RLC_THROW(ERR_NO_VALID); return NULL; } } /** * Applies or removes a PKCS#1 v1.5 encryption padding. * * @param[out] m - the buffer to pad. * @param[out] p_len - the number of added pad bytes. * @param[in] m_len - the message length in bytes. * @param[in] k_len - the key length in bytes. * @param[in] operation - flag to indicate the operation type. * @return RLC_ERR if errors occurred, RLC_OK otherwise. */ static int pad_pkcs1(bn_t m, int *p_len, int m_len, int k_len, int operation) { uint8_t *id, pad = 0; int len, result = RLC_ERR; bn_t t; bn_null(t); RLC_TRY { bn_new(t); switch (operation) { case RSA_ENC: /* EB = 00 | 02 | PS | 00 | D. */ bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PUB); *p_len = k_len - 3 - m_len; for (int i = 0; i < *p_len; i++) { bn_lsh(m, m, 8); do { rand_bytes(&pad, 1); } while (pad == 0); bn_add_dig(m, m, pad); } /* Make room for the zero and real message. */ bn_lsh(m, m, (m_len + 1) * 8); result = RLC_OK; break; case RSA_DEC: m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (bn_is_zero(t)) { *p_len = m_len; m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; if (pad == RSA_PUB) { do { m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad != 0 && m_len > 0); /* Remove padding and trailing zero. */ *p_len -= (m_len - 1); bn_mod_2b(m, m, (k_len - *p_len) * 8); result = (m_len > 0 ? RLC_OK : RLC_ERR); } } break; case RSA_SIG: /* EB = 00 | 01 | PS | 00 | D. */ id = hash_id(MD_MAP, &len); bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PRV); *p_len = k_len - 3 - m_len - len; for (int i = 0; i < *p_len; i++) { bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PAD); } /* Make room for the zero and hash id. */ bn_lsh(m, m, 8 * (len + 1)); bn_read_bin(t, id, len); bn_add(m, m, t); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); result = RLC_OK; break; case RSA_SIG_HASH: /* EB = 00 | 01 | PS | 00 | D. */ bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PRV); *p_len = k_len - 3 - m_len; for (int i = 0; i < *p_len; i++) { bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PAD); } /* Make room for the zero and hash. */ bn_lsh(m, m, 8 * (m_len + 1)); result = RLC_OK; break; case RSA_VER: m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (bn_is_zero(t)) { m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; if (pad == RSA_PRV) { int counter = 0; do { counter++; m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad == RSA_PAD && m_len > 0); /* Remove padding and trailing zero. */ id = hash_id(MD_MAP, &len); bn_rsh(t, m, 8 * m_len); bn_mod_2b(t, t, 8); if (bn_is_zero(t)) { m_len -= len; bn_rsh(t, m, 8 * m_len); int r = 0; for (int i = 0; i < len; i++) { pad = (uint8_t)t->dp[0]; r |= pad ^ id[len - i - 1]; bn_rsh(t, t, 8); } *p_len = k_len - m_len; bn_mod_2b(m, m, m_len * 8); if (r == 0 && m_len == RLC_MD_LEN && counter >= 8) { result = RLC_OK; } } } } break; case RSA_VER_HASH: m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (bn_is_zero(t)) { m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; if (pad == RSA_PRV) { int counter = 0; do { counter++; m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad == RSA_PAD && m_len > 0); /* Remove padding and trailing zero. */ *p_len = k_len - m_len; bn_rsh(t, m, 8 * m_len); bn_mod_2b(t, t, 8); if (bn_is_zero(t)) { bn_mod_2b(m, m, m_len * 8); if (m_len == RLC_MD_LEN && counter >= 8) { result = RLC_OK; } } } } break; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); } return result; } #endif #if CP_RSAPD == PKCS2 /** * Applies or removes a PKCS#1 v2.1 encryption padding. * * @param[out] m - the buffer to pad. * @param[out] p_len - the number of added pad bytes. * @param[in] m_len - the message length in bytes. * @param[in] k_len - the key length in bytes. * @param[in] operation - flag to indicate the operation type. * @return RLC_ERR if errors occurred, RLC_OK otherwise. */ static int pad_pkcs2(bn_t m, int *p_len, int m_len, int k_len, int operation) { uint8_t pad, h1[RLC_MD_LEN], h2[RLC_MD_LEN]; /* MSVC does not allow dynamic stack arrays */ uint8_t *mask = RLC_ALLOCA(uint8_t, k_len); int result = RLC_ERR; bn_t t; bn_null(t); RLC_TRY { bn_new(t); switch (operation) { case RSA_ENC: /* DB = lHash | PS | 01 | D. */ md_map(h1, NULL, 0); bn_read_bin(m, h1, RLC_MD_LEN); *p_len = k_len - 2 * RLC_MD_LEN - 2 - m_len; bn_lsh(m, m, *p_len * 8); bn_lsh(m, m, 8); bn_add_dig(m, m, 0x01); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); result = RLC_OK; break; case RSA_ENC_FIN: /* EB = 00 | maskedSeed | maskedDB. */ rand_bytes(h1, RLC_MD_LEN); md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN); bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1); for (int i = 0; i < t->used; i++) { m->dp[i] ^= t->dp[i]; } bn_write_bin(mask, k_len - RLC_MD_LEN - 1, m); md_mgf(h2, RLC_MD_LEN, mask, k_len - RLC_MD_LEN - 1); for (int i = 0; i < RLC_MD_LEN; i++) { h1[i] ^= h2[i]; } bn_read_bin(t, h1, RLC_MD_LEN); bn_lsh(t, t, 8 * (k_len - RLC_MD_LEN - 1)); bn_add(t, t, m); bn_copy(m, t); result = RLC_OK; break; case RSA_DEC: m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (bn_is_zero(t)) { m_len -= RLC_MD_LEN; bn_rsh(t, m, 8 * m_len); bn_write_bin(h1, RLC_MD_LEN, t); bn_mod_2b(m, m, 8 * m_len); bn_write_bin(mask, m_len, m); md_mgf(h2, RLC_MD_LEN, mask, m_len); for (int i = 0; i < RLC_MD_LEN; i++) { h1[i] ^= h2[i]; } md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN); bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1); for (int i = 0; i < t->used; i++) { m->dp[i] ^= t->dp[i]; } m_len -= RLC_MD_LEN; bn_rsh(t, m, 8 * m_len); bn_write_bin(h2, RLC_MD_LEN, t); md_map(h1, NULL, 0); pad = 0; for (int i = 0; i < RLC_MD_LEN; i++) { pad |= h1[i] ^ h2[i]; } bn_mod_2b(m, m, 8 * m_len); *p_len = bn_size_bin(m); (*p_len)--; bn_rsh(t, m, *p_len * 8); if (pad == 0 && bn_cmp_dig(t, 1) == RLC_EQ) { result = RLC_OK; } bn_mod_2b(m, m, *p_len * 8); *p_len = k_len - *p_len; } break; case RSA_SIG: case RSA_SIG_HASH: /* M' = 00 00 00 00 00 00 00 00 | H(M). */ bn_zero(m); bn_lsh(m, m, 64); /* Make room for the real message. */ bn_lsh(m, m, RLC_MD_LEN * 8); result = RLC_OK; break; case RSA_SIG_FIN: memset(mask, 0, 8); bn_write_bin(mask + 8, RLC_MD_LEN, m); md_map(h1, mask, RLC_MD_LEN + 8); bn_read_bin(m, h1, RLC_MD_LEN); md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN); bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1); t->dp[0] ^= 0x01; /* m_len is now the size in bits of the modulus. */ bn_lsh(t, t, 8 * RLC_MD_LEN); bn_add(m, t, m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PSS); for (int i = m_len - 1; i < 8 * k_len; i++) { bn_set_bit(m, i, 0); } result = RLC_OK; break; case RSA_VER: case RSA_VER_HASH: bn_mod_2b(t, m, 8); pad = (uint8_t)t->dp[0]; if (pad == RSA_PSS) { int r = 1; for (int i = m_len; i < 8 * k_len; i++) { if (bn_get_bit(m, i) != 0) { r = 0; } } bn_rsh(m, m, 8); bn_mod_2b(t, m, 8 * RLC_MD_LEN); bn_write_bin(h2, RLC_MD_LEN, t); bn_rsh(m, m, 8 * RLC_MD_LEN); bn_write_bin(h1, RLC_MD_LEN, t); md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN); bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1); for (int i = 0; i < t->used; i++) { m->dp[i] ^= t->dp[i]; } m->dp[0] ^= 0x01; for (int i = m_len - 1; i < 8 * k_len; i++) { bn_set_bit(m, i - ((RLC_MD_LEN + 1) * 8), 0); } if (r == 1 && bn_is_zero(m)) { result = RLC_OK; } bn_read_bin(m, h2, RLC_MD_LEN); *p_len = k_len - RLC_MD_LEN; } break; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); } RLC_FREE(mask); return result; } #endif /*============================================================================*/ /* Public definitions */ /*============================================================================*/ int cp_rsa_gen(rsa_t pub, rsa_t prv, int bits) { bn_t t, r; int result = RLC_OK; if (pub == NULL || prv == NULL || bits == 0) { return RLC_ERR; } bn_null(t); bn_null(r); RLC_TRY { bn_new(t); bn_new(r); /* Generate different primes p and q. */ do { bn_gen_prime(prv->crt->p, bits / 2); bn_gen_prime(prv->crt->q, bits / 2); } while (bn_cmp(prv->crt->p, prv->crt->q) == RLC_EQ); /* Swap p and q so that p is smaller. */ if (bn_cmp(prv->crt->p, prv->crt->q) != RLC_LT) { bn_copy(t, prv->crt->p); bn_copy(prv->crt->p, prv->crt->q); bn_copy(prv->crt->q, t); } /* n = pq. */ bn_mul(pub->crt->n, prv->crt->p, prv->crt->q); bn_copy(prv->crt->n, pub->crt->n); bn_sub_dig(prv->crt->p, prv->crt->p, 1); bn_sub_dig(prv->crt->q, prv->crt->q, 1); /* phi(n) = (p - 1)(q - 1). */ bn_mul(t, prv->crt->p, prv->crt->q); bn_set_2b(pub->e, 16); bn_add_dig(pub->e, pub->e, 1); #if !defined(CP_CRT) /* d = e^(-1) mod phi(n). */ bn_gcd_ext(r, prv->d, NULL, pub->e, t); if (bn_sign(prv->d) == RLC_NEG) { bn_add(prv->d, prv->d, t); } if (bn_cmp_dig(r, 1) == RLC_EQ) { /* Restore p and q. */ bn_add_dig(prv->crt->p, prv->crt->p, 1); bn_add_dig(prv->crt->q, prv->crt->q, 1); result = RLC_OK; } #else /* d = e^(-1) mod phi(n). */ bn_gcd_ext(r, prv->d, NULL, pub->e, t); if (bn_sign(prv->d) == RLC_NEG) { bn_add(prv->d, prv->d, t); } if (bn_cmp_dig(r, 1) == RLC_EQ) { /* dP = d mod (p - 1). */ bn_mod(prv->crt->dp, prv->d, prv->crt->p); /* dQ = d mod (q - 1). */ bn_mod(prv->crt->dq, prv->d, prv->crt->q); /* Restore p and q. */ bn_add_dig(prv->crt->p, prv->crt->p, 1); bn_add_dig(prv->crt->q, prv->crt->q, 1); /* qInv = q^(-1) mod p. */ bn_mod_inv(prv->crt->qi, prv->crt->q, prv->crt->p); result = RLC_OK; } #endif /* CP_CRT */ } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); bn_free(r); } return result; } int cp_rsa_enc(uint8_t *out, int *out_len, uint8_t *in, int in_len, rsa_t pub) { bn_t m, eb; int size, pad_len, result = RLC_OK; bn_null(m); bn_null(eb); size = bn_size_bin(pub->crt->n); if (pub == NULL || in_len <= 0 || in_len > (size - RSA_PAD_LEN)) { return RLC_ERR; } RLC_TRY { bn_new(m); bn_new(eb); bn_zero(m); bn_zero(eb); #if CP_RSAPD == BASIC if (pad_basic(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) { #elif CP_RSAPD == PKCS1 if (pad_pkcs1(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) { #elif CP_RSAPD == PKCS2 if (pad_pkcs2(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) { #endif bn_read_bin(m, in, in_len); bn_add(eb, eb, m); #if CP_RSAPD == PKCS2 pad_pkcs2(eb, &pad_len, in_len, size, RSA_ENC_FIN); #endif bn_mxp(eb, eb, pub->e, pub->crt->n); if (size <= *out_len) { *out_len = size; memset(out, 0, *out_len); bn_write_bin(out, size, eb); } else { result = RLC_ERR; } } else { result = RLC_ERR; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(m); bn_free(eb); } return result; } int cp_rsa_dec(uint8_t *out, int *out_len, uint8_t *in, int in_len, rsa_t prv) { bn_t m, eb; int size, pad_len, result = RLC_OK; bn_null(m); bn_null(eb); size = bn_size_bin(prv->crt->n); if (prv == NULL || in_len != size || in_len < RSA_PAD_LEN) { return RLC_ERR; } RLC_TRY { bn_new(m); bn_new(eb); bn_read_bin(eb, in, in_len); #if !defined(CP_CRT) bn_mxp(eb, eb, prv->d, prv->crt->n); #else bn_copy(m, eb); #if MULTI == OPENMP omp_set_num_threads(CORES); #pragma omp parallel copyin(core_ctx) firstprivate(prv) { #pragma omp sections { #pragma omp section { #endif /* m1 = c^dP mod p. */ bn_mxp(eb, eb, prv->crt->dp, prv->crt->p); #if MULTI == OPENMP } #pragma omp section { #endif /* m2 = c^dQ mod q. */ bn_mxp(m, m, prv->crt->dq, prv->crt->q); #if MULTI == OPENMP } } } #endif /* m1 = m1 - m2 mod p. */ bn_sub(eb, eb, m); while (bn_sign(eb) == RLC_NEG) { bn_add(eb, eb, prv->crt->p); } bn_mod(eb, eb, prv->crt->p); /* m1 = qInv(m1 - m2) mod p. */ bn_mul(eb, eb, prv->crt->qi); bn_mod(eb, eb, prv->crt->p); /* m = m2 + m1 * q. */ bn_mul(eb, eb, prv->crt->q); bn_add(eb, eb, m); #endif /* CP_CRT */ if (bn_cmp(eb, prv->crt->n) != RLC_LT) { result = RLC_ERR; } #if CP_RSAPD == BASIC if (pad_basic(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) { #elif CP_RSAPD == PKCS1 if (pad_pkcs1(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) { #elif CP_RSAPD == PKCS2 if (pad_pkcs2(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) { #endif size = size - pad_len; if (size <= *out_len) { memset(out, 0, size); bn_write_bin(out, size, eb); *out_len = size; } else { result = RLC_ERR; } } else { result = RLC_ERR; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(m); bn_free(eb); } return result; } int cp_rsa_sig(uint8_t *sig, int *sig_len, uint8_t *msg, int msg_len, int hash, rsa_t prv) { bn_t m, eb; int pad_len, size, result = RLC_OK; uint8_t h[RLC_MD_LEN]; if (prv == NULL || msg_len < 0) { return RLC_ERR; } pad_len = (!hash ? RLC_MD_LEN : msg_len); #if CP_RSAPD == PKCS2 size = bn_bits(prv->crt->n) - 1; size = (size / 8) + (size % 8 > 0); if (pad_len > (size - 2)) { return RLC_ERR; } #else size = bn_size_bin(prv->crt->n); if (pad_len > (size - RSA_PAD_LEN)) { return RLC_ERR; } #endif bn_null(m); bn_null(eb); RLC_TRY { bn_new(m); bn_new(eb); bn_zero(m); bn_zero(eb); int operation = (!hash ? RSA_SIG : RSA_SIG_HASH); #if CP_RSAPD == BASIC if (pad_basic(eb, &pad_len, pad_len, size, operation) == RLC_OK) { #elif CP_RSAPD == PKCS1 if (pad_pkcs1(eb, &pad_len, pad_len, size, operation) == RLC_OK) { #elif CP_RSAPD == PKCS2 if (pad_pkcs2(eb, &pad_len, pad_len, size, operation) == RLC_OK) { #endif if (!hash) { md_map(h, msg, msg_len); bn_read_bin(m, h, RLC_MD_LEN); bn_add(eb, eb, m); } else { bn_read_bin(m, msg, msg_len); bn_add(eb, eb, m); } #if CP_RSAPD == PKCS2 pad_pkcs2(eb, &pad_len, bn_bits(prv->crt->n), size, RSA_SIG_FIN); #endif bn_copy(m, eb); #if !defined(CP_CRT) bn_mxp(eb, eb, prv->d, prv->crt->n); #else /* CP_CRT */ #if MULTI == OPENMP omp_set_num_threads(CORES); #pragma omp parallel copyin(core_ctx) firstprivate(prv) { #pragma omp sections { #pragma omp section { #endif /* m1 = c^dP mod p. */ bn_mxp(eb, eb, prv->crt->dp, prv->crt->p); #if MULTI == OPENMP } #pragma omp section { #endif /* m2 = c^dQ mod q. */ bn_mxp(m, m, prv->crt->dq, prv->crt->q); #if MULTI == OPENMP } } } #endif /* m1 = m1 - m2 mod p. */ bn_sub(eb, eb, m); while (bn_sign(eb) == RLC_NEG) { bn_add(eb, eb, prv->crt->p); } bn_mod(eb, eb, prv->crt->p); /* m1 = qInv(m1 - m2) mod p. */ bn_mul(eb, eb, prv->crt->qi); bn_mod(eb, eb, prv->crt->p); /* m = m2 + m1 * q. */ bn_mul(eb, eb, prv->crt->q); bn_add(eb, eb, m); bn_mod(eb, eb, prv->crt->n); #endif /* CP_CRT */ size = bn_size_bin(prv->crt->n); if (size <= *sig_len) { memset(sig, 0, size); bn_write_bin(sig, size, eb); *sig_len = size; } else { result = RLC_ERR; } } else { result = RLC_ERR; } } RLC_CATCH_ANY { RLC_THROW(ERR_CAUGHT); } RLC_FINALLY { bn_free(m); bn_free(eb); } return result; } int cp_rsa_ver(uint8_t *sig, int sig_len, uint8_t *msg, int msg_len, int hash, rsa_t pub) { bn_t m, eb; int size, pad_len, result; uint8_t *h1 = RLC_ALLOCA(uint8_t, RLC_MAX(msg_len, RLC_MD_LEN) + 8); uint8_t *h2 = RLC_ALLOCA(uint8_t, RLC_MAX(msg_len, RLC_MD_LEN)); /* We suppose that the signature is invalid. */ result = 0; if (h1 == NULL || h2 == NULL) { RLC_FREE(h1); RLC_FREE(h2); return 0; } if (pub == NULL || msg_len < 0) { return 0; } pad_len = (!hash ? RLC_MD_LEN : msg_len); #if CP_RSAPD == PKCS2 size = bn_bits(pub->crt->n) - 1; if (size % 8 == 0) { size = size / 8 - 1; } else { size = bn_size_bin(pub->crt->n); } if (pad_len > (size - 2)) { return 0; } #else size = bn_size_bin(pub->crt->n); if (pad_len > (size - RSA_PAD_LEN)) { return 0; } #endif bn_null(m); bn_null(eb); RLC_TRY { bn_new(m); bn_new(eb); bn_read_bin(eb, sig, sig_len); bn_mxp(eb, eb, pub->e, pub->crt->n); int operation = (!hash ? RSA_VER : RSA_VER_HASH); #if CP_RSAPD == BASIC if (pad_basic(eb, &pad_len, RLC_MD_LEN, size, operation) == RLC_OK) { #elif CP_RSAPD == PKCS1 if (pad_pkcs1(eb, &pad_len, RLC_MD_LEN, size, operation) == RLC_OK) { #elif CP_RSAPD == PKCS2 if (pad_pkcs2(eb, &pad_len, bn_bits(pub->crt->n), size, operation) == RLC_OK) { #endif #if CP_RSAPD == PKCS2 memset(h1, 0, 8); if (!hash) { md_map(h1 + 8, msg, msg_len); md_map(h2, h1, RLC_MD_LEN + 8); memset(h1, 0, RLC_MD_LEN); bn_write_bin(h1, size - pad_len, eb); /* Everything went ok, so signature status is changed. */ result = util_cmp_const(h1, h2, RLC_MD_LEN); } else { memcpy(h1 + 8, msg, msg_len); md_map(h2, h1, RLC_MD_LEN + 8); memset(h1, 0, msg_len); bn_write_bin(h1, size - pad_len, eb); /* Everything went ok, so signature status is changed. */ result = util_cmp_const(h1, h2, msg_len); } #else memset(h1, 0, RLC_MAX(msg_len, RLC_MD_LEN)); bn_write_bin(h1, size - pad_len, eb); if (!hash) { md_map(h2, msg, msg_len); /* Everything went ok, so signature status is changed. */ result = util_cmp_const(h1, h2, RLC_MD_LEN); } else { /* Everything went ok, so signature status is changed. */ result = util_cmp_const(h1, msg, msg_len); } #endif result = (result == RLC_EQ ? 1 : 0); } else { result = 0; } } RLC_CATCH_ANY { result = 0; } RLC_FINALLY { bn_free(m); bn_free(eb); RLC_FREE(h1); RLC_FREE(h2); } return result; }
GB_binop__isle_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__isle_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__isle_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_fp32) // A*D function (colscale): GB (_AxD__isle_fp32) // D*A function (rowscale): GB (_DxB__isle_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_fp32) // C=scalar+B GB (_bind1st__isle_fp32) // C=scalar+B' GB (_bind1st_tran__isle_fp32) // C=A+scalar GB (_bind2nd__isle_fp32) // C=A'+scalar GB (_bind2nd_tran__isle_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_FP32 || GxB_NO_ISLE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_sgemm_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__) #if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ void im2col_sgemm_pack1to4_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ void im2col_sgemm_pack1to4_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__ void im2col_sgemm_pack1to4_int8_sse_avx2(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__ void im2col_sgemm_pack1to4_int8_sse_xop(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #endif static void im2col_sgemm_pack1to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__) #if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { im2col_sgemm_pack1to4_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { im2col_sgemm_pack1to4_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { im2col_sgemm_pack1to4_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { im2col_sgemm_pack1to4_int8_sse_xop(bottom_im2col, top_blob, kernel, opt); return; } #endif #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; if (inch >= 4) { #if __AVX2__ if (size >= 4) tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); #else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); #endif } else { #if __AVX2__ if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); #else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); #endif } { #if __AVX2__ int remain_size_start = 0; int nn_size = size >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; signed char* tmpptr = tmp.channel(i / 4); int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr[8] = img0[2]; tmpptr[9] = img1[2]; tmpptr[10] = img2[2]; tmpptr[11] = img3[2]; tmpptr[12] = img0[3]; tmpptr[13] = img1[3]; tmpptr[14] = img2[3]; tmpptr[15] = img3[3]; tmpptr += 16; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __AVX2__ signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __AVX2__ signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __AVX2__ for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 4); const signed char* kptr0 = kernel.channel(p); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; __m256i _sum00_12 = _mm256_setzero_si256(); __m256i _sum20_32 = _mm256_setzero_si256(); if (nn4 > 0) { #if __AVXVNNI__ || __AVX512VNNI__ __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum30_22 = _mm256_setzero_si256(); #else __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum01_13 = _mm256_setzero_si256(); __m256i _sum11_03 = _mm256_setzero_si256(); __m256i _sum30_22 = _mm256_setzero_si256(); __m256i _sum21_33 = _mm256_setzero_si256(); __m256i _sum31_23 = _mm256_setzero_si256(); #endif int j = 0; for (; j < nn4; j++) { __m128i _val0123 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val0123_16 = _mm256_cvtepi8_epi16(_val0123); __m256i _val01_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(1, 1, 0, 0)); __m256i _val23_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(3, 3, 2, 2)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); __m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16); _sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16); _sum20_32 = _mm256_dpwssd_epi32(_sum20_32, _val23_16, _w01_16); _sum30_22 = _mm256_dpwssd_epi32(_sum30_22, _val32_16, _w01_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); __m256i _sl20_31 = _mm256_mullo_epi16(_val23_16, _w01_16); __m256i _sh20_31 = _mm256_mulhi_epi16(_val23_16, _w01_16); __m256i _sl30_21 = _mm256_mullo_epi16(_val32_16, _w01_16); __m256i _sh30_21 = _mm256_mulhi_epi16(_val32_16, _w01_16); _sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); _sum20_32 = _mm256_add_epi32(_sum20_32, _mm256_unpacklo_epi16(_sl20_31, _sh20_31)); _sum30_22 = _mm256_add_epi32(_sum30_22, _mm256_unpacklo_epi16(_sl30_21, _sh30_21)); _sum21_33 = _mm256_add_epi32(_sum21_33, _mm256_unpackhi_epi16(_sl20_31, _sh20_31)); _sum31_23 = _mm256_add_epi32(_sum31_23, _mm256_unpackhi_epi16(_sl30_21, _sh30_21)); #endif tmpptr += 16; kptr0 += 16; } #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02); _sum20_32 = _mm256_hadd_epi32(_sum20_32, _sum30_22); _sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0)); _sum20_32 = _mm256_permute4x64_epi64(_sum20_32, _MM_SHUFFLE(2, 1, 3, 0)); #else // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02); _tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02); _tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum20_32, _sum30_22); _tmp1 = _mm256_unpacklo_epi32(_sum21_33, _sum31_23); _tmp2 = _mm256_unpackhi_epi32(_sum20_32, _sum30_22); _tmp3 = _mm256_unpackhi_epi32(_sum21_33, _sum31_23); _sum20_32 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum30_22 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum21_33 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum31_23 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02); _sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13); _sum20_32 = _mm256_add_epi32(_sum20_32, _sum30_22); _sum21_33 = _mm256_add_epi32(_sum21_33, _sum31_23); _sum20_32 = _mm256_add_epi32(_sum20_32, _sum21_33); __m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0); _sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask); _sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask); #endif } __m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0); __m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1); __m128i _sum20 = _mm256_extracti128_si256(_sum20_32, 0); __m128i _sum30 = _mm256_extracti128_si256(_sum20_32, 1); int j = 0; for (; j < nn1; j++) { __m128i _val01 = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]); __m128i _val23 = _mm_set_epi16(tmpptr[3], tmpptr[3], tmpptr[3], tmpptr[3], tmpptr[2], tmpptr[2], tmpptr[2], tmpptr[2]); __m128i _w0123 = _mm_set_epi16(kptr0[3], kptr0[2], kptr0[1], kptr0[0], kptr0[3], kptr0[2], kptr0[1], kptr0[0]); __m128i _sl00 = _mm_mullo_epi16(_val01, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val01, _w0123); __m128i _sl10 = _mm_mullo_epi16(_val23, _w0123); __m128i _sh10 = _mm_mulhi_epi16(_val23, _w0123); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00)); _sum20 = _mm_add_epi32(_sum20, _mm_unpacklo_epi16(_sl10, _sh10)); _sum30 = _mm_add_epi32(_sum30, _mm_unpackhi_epi16(_sl10, _sh10)); tmpptr += 4; kptr0 += 4; } _mm_storeu_si128((__m128i*)outptr0, _sum00); _mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10); _mm_storeu_si128((__m128i*)(outptr0 + 8), _sum20); _mm_storeu_si128((__m128i*)(outptr0 + 12), _sum30); outptr0 += 16; } #endif for (; i + 1 < size; i += 2) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; #if __AVX2__ __m256i _sum00_12 = _mm256_setzero_si256(); #else __m128i _sum00 = _mm_setzero_si128(); __m128i _sum10 = _mm_setzero_si128(); #endif if (nn4 > 0) { #if __AVX2__ #if __AVXVNNI__ || __AVX512VNNI__ __m256i _sum10_02 = _mm256_setzero_si256(); #else __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum01_13 = _mm256_setzero_si256(); __m256i _sum11_03 = _mm256_setzero_si256(); #endif #else #if __XOP__ __m128i _sum01 = _mm_setzero_si128(); __m128i _sum11 = _mm_setzero_si128(); #else __m128i _sum01 = _mm_setzero_si128(); __m128i _sum02 = _mm_setzero_si128(); __m128i _sum03 = _mm_setzero_si128(); __m128i _sum11 = _mm_setzero_si128(); __m128i _sum12 = _mm_setzero_si128(); __m128i _sum13 = _mm_setzero_si128(); #endif #endif int j = 0; for (; j < nn4; j++) { #if __AVX2__ __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); _val01_16 = _mm256_permute4x64_epi64(_val01_16, _MM_SHUFFLE(1, 1, 0, 0)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16); _sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); _sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); #endif #else __m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ _val01 = _mm_cvtepi8_epi16(_val01); #else __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); _val01 = _mm_unpacklo_epi8(_val01, _extval01); #endif __m128i _val0 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _val1 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(3, 2, 3, 2)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); #if __XOP__ _sum00 = _mm_maddd_epi16(_val0, _w0, _sum00); _sum01 = _mm_maddd_epi16(_val0, _w1, _sum01); _sum10 = _mm_maddd_epi16(_val1, _w0, _sum10); _sum11 = _mm_maddd_epi16(_val1, _w1, _sum11); #else __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); __m128i _sl10 = _mm_mullo_epi16(_val1, _w0); __m128i _sh10 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl11 = _mm_mullo_epi16(_val1, _w1); __m128i _sh11 = _mm_mulhi_epi16(_val1, _w1); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl00, _sh00)); _sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl01, _sh01)); _sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl01, _sh01)); _sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl10, _sh10)); _sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl11, _sh11)); _sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl11, _sh11)); #endif #endif tmpptr += 8; kptr0 += 16; } #if __AVX2__ #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02); _sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0)); #else // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02); _tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02); _tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02); _sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13); __m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0); _sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask); #endif #else #if __XOP__ _sum00 = _mm_hadd_epi32(_sum00, _sum01); _sum10 = _mm_hadd_epi32(_sum10, _sum11); #else // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum00, _sum01); _tmp1 = _mm_unpacklo_epi32(_sum02, _sum03); _tmp2 = _mm_unpackhi_epi32(_sum00, _sum01); _tmp3 = _mm_unpackhi_epi32(_sum02, _sum03); _sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3); } { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum10, _sum11); _tmp1 = _mm_unpacklo_epi32(_sum12, _sum13); _tmp2 = _mm_unpackhi_epi32(_sum10, _sum11); _tmp3 = _mm_unpackhi_epi32(_sum12, _sum13); _sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum00 = _mm_add_epi32(_sum00, _sum01); _sum02 = _mm_add_epi32(_sum02, _sum03); _sum10 = _mm_add_epi32(_sum10, _sum11); _sum12 = _mm_add_epi32(_sum12, _sum13); _sum00 = _mm_add_epi32(_sum00, _sum02); _sum10 = _mm_add_epi32(_sum10, _sum12); #endif #endif } #if __AVX2__ __m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0); __m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1); #endif int j = 0; for (; j < nn1; j++) { __m128i _val = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ _w0123 = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); _w0123 = _mm_unpacklo_epi8(_w0123, _extw0123); #endif _w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _sl00 = _mm_mullo_epi16(_val, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val, _w0123); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00)); tmpptr += 2; kptr0 += 4; } _mm_storeu_si128((__m128i*)outptr0, _sum00); _mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10); outptr0 += 8; } for (; i < size; i++) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; __m128i _sum0 = _mm_setzero_si128(); if (nn4 > 0) { __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); int j = 0; for (; j < nn4; j++) { __m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ __m128i _val0 = _mm_cvtepi8_epi16(_val01); #else __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); __m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01); #endif _val0 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl01, _sh01)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl01, _sh01)); tmpptr += 4; kptr0 += 16; } // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum0, _sum1); _tmp1 = _mm_unpacklo_epi32(_sum2, _sum3); _tmp2 = _mm_unpackhi_epi32(_sum0, _sum1); _tmp3 = _mm_unpackhi_epi32(_sum2, _sum3); _sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum0 = _mm_add_epi32(_sum0, _sum2); } int j = 0; for (; j < nn1; j++) { __m128i _val = _mm_set1_epi16(tmpptr[0]); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ _w0123 = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); _w0123 = _mm_unpacklo_epi8(_w0123, _extw0123); #endif __m128i _sl00 = _mm_mullo_epi16(_val, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val, _w0123); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); tmpptr += 1; kptr0 += 4; } _mm_storeu_si128((__m128i*)outptr0, _sum0); outptr0 += 4; } } } static void convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 4a-4b-maxk-inch/4a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); if (inch >= 4) kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4, (size_t)1u); else kernel_tm.create(4 * maxk, inch, outch / 4, (size_t)1u); for (int q = 0; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to4_int8_sse(bottom_im2col, top_blob, kernel, opt); }
matfft.c
#include "matrix.h" /** \brief Computes fast Fourier transform * * \param[in] C Complex data matrix stack * \param[in] dir FFT direction (MAT_FFT2_FORWARD/MAT_FFT2_BACKWARD) * \param[in] result Matrix stack to store the result * \return Transformed matrix stack * */ MATSTACK mat_fft2(MATSTACK c, int dir, MATSTACK result) { int i, j, m, n, mm, nn, twopm; MATRIX real = NULL, imag = NULL, realt = NULL, imagt = NULL; m = MatCol(c[0]); n = MatRow(c[0]); if(result==NULL) if((result = matstack_creat(2))==NULL) matstack_error(MATSTACK_MALLOC); real = mat_copy(c[0], NULL); imag = mat_copy(c[1], NULL); if(real==NULL || imag==NULL) return(matstack_error(MATSTACK_MALLOC)); if(!__mat_powerof2(m, &mm, &twopm) || twopm!=m) gen_error(GEN_MATH_ERROR); if(!__mat_powerof2(n, &nn, &twopm) || twopm!=n) gen_error(GEN_MATH_ERROR); #pragma omp parallel for for(j=0; j<n; ++j) __mat_fft(dir, mm, real[j], imag[j]); realt = mat_tran(real, NULL); imagt = mat_tran(imag, NULL); #pragma omp parallel for for(i=0; i<m; ++i) __mat_fft(dir, nn,realt[i],imagt[i]); mat_free(real); mat_free(imag); result[0] = mat_tran(realt, result[0]); result[1] = mat_tran(imagt, result[1]); mat_free(realt); mat_free(imagt); return result; } /** \cond HIDDEN_SYMBOLS */ int __mat_fft(int dir, int m, mtype *x,mtype *y) { long nn, i, i1, j, k, i2, l, l1, l2; mtype c1, c2, tx, ty, t1, t2, u1, u2, z; nn = 1; for(i=0; i<m; ++i) nn *= 2; i2 = nn>>1; j = 0; for(i=0; i<nn-1; ++i) { if(i<j) { tx = x[i]; ty = y[i]; x[i] = x[j]; y[i] = y[j]; x[j] = tx; y[j] = ty; } k = i2; while(k<=j) { j -= k; k >>= 1; } j += k; } c1 = -1.0; c2 = 0.0; l2 = 1; for(l=0; l<m; ++l) { l1 = l2; l2 <<= 1; u1 = 1.0; u2 = 0.0; for(j=0; j<l1; ++j) { for(i=j; i<nn; i+=l2) { i1 = i+l1; t1 = u1*x[i1]-u2*y[i1]; t2 = u1*y[i1]+u2*x[i1]; x[i1] = x[i]-t1; y[i1] = y[i]-t2; x[i] += t1; y[i] += t2; } z = u1*c1-u2*c2; u2 = u1*c2+u2*c1; u1 = z; } c2 = (mtype)sqrt((1.0f-c1)/2.0f); if(dir == MAT_FFT2_FORWARD) c2 = -c2; c1 = (mtype)sqrt((1.0f+c1)/2.0f); } if(dir==MAT_FFT2_BACKWARD) for(i=0; i<nn; i++) { x[i] /= (mtype)nn; y[i] /= (mtype)nn; } return 0; } /** \endcond */
GB_positional_op_ijp.c
//------------------------------------------------------------------------------ // GB_positional_op_ijp: C = positional_op (A), depending j //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // TODO: use this kernel for GrB_extractTuples, to create J array. // A can be jumbled. If A is jumbled, so is C. // if A and C are bitmap, not all of Cx need to be written to, but it's faster // just to write to all of it. C->b is copied from A->b in the caller. { //-------------------------------------------------------------------------- // slice the entries for each task //-------------------------------------------------------------------------- GB_WERK_DECLARE (A_ek_slicing, int64_t) ; int A_ntasks, A_nthreads ; GB_SLICE_MATRIX (A, 32, chunk) ; //-------------------------------------------------------------------------- // Cx = positional_op (A) //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; //---------------------------------------------------------------------- // C(:,kfirst:klast) = op (A(:,kfirst:klast)) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) and Cx to be operated on by this task //------------------------------------------------------------------ int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; //------------------------------------------------------------------ // C(:,j) = op (A(:,j)) //------------------------------------------------------------------ for (int64_t p = pA_start ; p < pA_end ; p++) { // Cx [p] = op (A (i,j)) GB_APPLY (p) ; } } } //-------------------------------------------------------------------------- // free workspace //-------------------------------------------------------------------------- GB_WERK_POP (A_ek_slicing, int64_t) ; } #undef GB_APPLY
AzureAD_fmt_plug.c
/* * This software is Copyright (c) 2015 JimF, <jfoug at openwall.com>, and * it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Azure ActiveDirectory, V1 cracker patch for JtR. * * Algorithm: https://www.dsinternals.com/en/how-azure-active-directory-connect-syncs-passwords/ * * PBKDF2(UTF-16(uc(hex(MD4(UTF-16(password))))), rnd_salt(10), 100, HMAC-SHA256, 32) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_AzureAD; #elif FMT_REGISTERS_H john_register_one(&fmt_AzureAD); #else #include <string.h> #include "arch.h" #include "md4.h" #include "pbkdf2_hmac_sha256.h" #include "common.h" #include "formats.h" #include "base64_convert.h" #include "AzureAD_common.h" #include "unicode.h" #include "johnswap.h" //#undef SIMD_COEF_32 //#undef SIMD_PARA_SHA256 #ifdef _OPENMP #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 64 // FIXME #endif #else #ifndef OMP_SCALE #define OMP_SCALE 64 // FIXME #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "AzureAD" #define FORMAT_NAME "" #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA256) #else #define NBKEYS 1 #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define BINARY_SIZE DIGEST_SIZE #define BINARY_ALIGN 4 // For now, I will do md4() oSSL type for all passwords. There is so much // other overhead that adding the complexity to do SIMD md4 will gain us // almost nothing #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static char (*saved_nt)[64]; static int dirty; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_WORD); saved_nt = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_nt), MEM_ALIGN_WORD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*crypt_out), MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_nt); MEM_FREE(saved_key); } static void *salt(char *ciphertext) { char Buf[120], *ctcopy=Buf; char *p; static struct AzureAD_custom_salt cs; memset(&cs, 0, sizeof(cs)); strncpy(Buf, ciphertext, 119); Buf[119] = 0; ctcopy += TAG_LENGTH; p = strtokm(ctcopy, ","); cs.salt_len = strlen(p)/2; base64_convert(p, e_b64_hex, cs.salt_len*2, cs.salt, e_b64_raw, cs.salt_len, 0, 0); p = strtokm(NULL, ","); cs.iterations = atoi(p); p = strtokm(Buf, ","); strncpy(cs.version, p, 8); cs.version[7] = 0; return (void *)&cs; } static void set_salt(void *salt) { AzureAD_cur_salt = (struct AzureAD_custom_salt *)salt; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { // * PBKDF2(UTF-16(uc(hex(MD4(UTF-16(password))))), rnd_salt(10), 100, HMAC-SHA256, 32) // Trivial for now. Can optimized later. UTF16 Buf[PLAINTEXT_LENGTH+1]; unsigned char hash[16], hex[33]; int len, cnt, i; MD4_CTX ctx; #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT]; union { uint32_t *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; cnt = MAX_KEYS_PER_CRYPT; #else cnt = 1; #endif if (dirty) for (i = 0; i < cnt; ++i) { len = enc_to_utf16(Buf, PLAINTEXT_LENGTH, (UTF8*)saved_key[index+i], strlen(saved_key[index+i])); if (len < 0) len = 0; MD4_Init(&ctx); MD4_Update(&ctx, Buf, len*2); MD4_Final(hash, &ctx); base64_convert(hash, e_b64_raw, 16, hex, e_b64_hex, sizeof(hex), flg_Base64_HEX_UPCASE, 0); for (len = 0; len < 32; ++len) saved_nt[index+i][len<<1] = hex[len]; } #ifdef SIMD_COEF_32 for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = 64; pin[i] = (unsigned char*)saved_nt[i+index]; x.pout[i] = crypt_out[i+index]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, AzureAD_cur_salt->salt, AzureAD_cur_salt->salt_len, AzureAD_cur_salt->iterations, &(x.poutc), 32, 0); #else pbkdf2_sha256((unsigned char *)saved_nt[index], 64, AzureAD_cur_salt->salt, AzureAD_cur_salt->salt_len, AzureAD_cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0); #endif } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], 4)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_AzureAD = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, AzureAD_common_tests }, { init, done, fmt_default_reset, fmt_default_prepare, AzureAD_common_valid, AzureAD_common_split, AzureAD_common_get_binary, salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
mixed_generic_criteria.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela, Riccardo Rossi, Carlos Roig and Ruben Zorrilla // #ifndef KRATOS_MIXED_GENERIC_CRITERIA_H #define KRATOS_MIXED_GENERIC_CRITERIA_H // System includes // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "convergence_criteria.h" // Application includes namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Classes ///@{ /// Convergence criteria for mixed vector-scalar problems. /** This class implements a convergence control based on a nodal vector variable and a nodal scalar variable. The error is evaluated separately for each of them, and relative and absolute tolerances for both must be specified. */ template< class TSparseSpace, class TDenseSpace > class MixedGenericCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(MixedGenericCriteria); typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef MixedGenericCriteria< TSparseSpace, TDenseSpace > ClassType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef std::vector<std::tuple<const VariableData*, TDataType, TDataType>> ConvergenceVariableListType; typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /// Constructor. explicit MixedGenericCriteria() : BaseType(), mVariableSize(0) { } /** * @brief Default constructor. (with parameters) * @param ThisParameters The configuration parameters */ explicit MixedGenericCriteria(Kratos::Parameters ThisParameters) : MixedGenericCriteria(GenerateConvergenceVariableListFromParameters(ThisParameters)) { } /** * @brief Construct a new Mixed Generic Criteria object * Construct the mixed generic convergence criteria from a convergence variables list. * The convergence variable list contains for each variable the variable itself as well as the corresponding relative and absolute tolerances. * @param rConvergenceVariablesList List containing tuples with the convergence variables to be checked. The tuples are set as <Variable, relative tolerance, absolute tolerance> */ MixedGenericCriteria(const ConvergenceVariableListType& rConvergenceVariablesList) : BaseType() , mVariableSize([&] (const ConvergenceVariableListType& rList) -> int {return rList.size();} (rConvergenceVariablesList)) , mVariableDataVector([&] (const ConvergenceVariableListType& rList) -> std::vector<const VariableData*> { int i = 0; std::vector<const VariableData*> aux_vect(mVariableSize); for (const auto &r_tup : rList) { aux_vect[i++] = std::get<0>(r_tup); } return aux_vect; } (rConvergenceVariablesList)) , mRatioToleranceVector([&] (const ConvergenceVariableListType& rList) -> std::vector<TDataType> { int i = 0; std::vector<TDataType> aux_vect(mVariableSize); for (const auto &r_tup : rList) { aux_vect[i++] = std::get<1>(r_tup); } return aux_vect; } (rConvergenceVariablesList)) , mAbsToleranceVector([&] (const ConvergenceVariableListType& rList) -> std::vector<TDataType> { int i = 0; std::vector<TDataType> aux_vect(mVariableSize); for (const auto &r_tup : rList) { aux_vect[i++] = std::get<2>(r_tup); } return aux_vect; } (rConvergenceVariablesList)) , mLocalKeyMap([&] (const ConvergenceVariableListType& rList) -> std::unordered_map<KeyType, KeyType> { KeyType local_key = 0; std::unordered_map<KeyType, KeyType> aux_map; for (const auto &r_tup : rList) { const auto *p_var_data = std::get<0>(r_tup); if (aux_map.find(p_var_data->Key()) != aux_map.end()) { KRATOS_ERROR << "Convergence variable " << p_var_data->Name() << " is repeated. Check the input convergence variable list." << std::endl; } else { KRATOS_ERROR_IF(p_var_data->IsComponent()) << "Trying to check convergence with the " << p_var_data->Name() << " component variable. Use the corresponding vector one." << std::endl; aux_map[p_var_data->Key()] = local_key++; } } return aux_map; } (rConvergenceVariablesList)) {} /// Destructor. ~MixedGenericCriteria() override {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create(Parameters ThisParameters) const override { return Kratos::make_shared<ClassType>(ThisParameters); } /// Compute relative and absoute error. /** * @param rModelPart Reference to the ModelPart containing the fluid problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param A System matrix (unused) * @param Dx Vector of results (variations on nodal variables) * @param b RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b) override { // Check if we are solving for something if (TSparseSpace::Size(Dx) != 0) { // Calculate the convergence ratio and absolute norms const auto convergence_norms = CalculateConvergenceNorms(rModelPart, rDofSet, Dx); // Output convergence status OutputConvergenceStatus(convergence_norms); // Check convergence return CheckConvergence(convergence_norms); } else { // Case in which all the DOFs are constrained! return true; } } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "mixed_generic_criteria"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "MixedGenericCriteria"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} protected: ///@name Protected Static Member Variables ///@{ ///@} ///@name Protected Member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief Get the Variable Size object * Get the number of variables to be checked * @return const int Number of variables to check */ int GetVariableSize() const { return mVariableSize; } /** * @brief Get the Variable Data Vector object * Get the member vector that stores pointers to the variables to check * @return std::vector<VariableData*> Vector containing pointers to the variables to check */ std::vector<const VariableData*> GetVariableDataVector() const { return mVariableDataVector; } /** * @brief Get the Ratio Tolerance Vector object * Get the member vector containing the ratio tolerances for each variable to check * @return std::vector<TDataType> Vector containing the ratio tolerances */ std::vector<TDataType> GetRatioToleranceVector() const { return mRatioToleranceVector; } /** * @brief Get the Abs Tolerance Vector object * Get the member vector containing the absolute tolerances for each variable to check * @return std::vector<TDataType> Vector containing the absolute tolerances */ std::vector<TDataType> GetAbsToleranceVector() const { return mAbsToleranceVector; } /** * @brief Get the Local Key Map object * Returns a reference to the variable key local map * @return std::unordered_map<KeyType, KeyType>& Reference to the local key map */ std::unordered_map<KeyType, KeyType>& GetLocalKeyMap() { return mLocalKeyMap; } /** * @brief Calculate the convergence norms * This method calculates the convergence norms for all the variables to be checked * @param rModelPart Reference to the ModelPart containing the fluid problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rDx Vector of results (variations on nodal variables) * @return std::tuple<std::vector<TDataType>, std::vector<TDataType>> Tuple containing the absolute and relative convergence values */ std::tuple<std::vector<TDataType>, std::vector<TDataType>> CalculateConvergenceNorms( const ModelPart& rModelPart, const DofsArrayType& rDofSet, const TSystemVectorType& rDx) { // Initialize std::vector<int> dofs_count(mVariableSize, 0); std::vector<TDataType> solution_norms_vector(mVariableSize, 0.0); std::vector<TDataType> increase_norms_vector(mVariableSize, 0.0); // Accumulate the norm values GetNormValues(rModelPart, rDofSet, rDx, dofs_count, solution_norms_vector, increase_norms_vector); // Synchronize the norm values const auto& r_data_comm = rModelPart.GetCommunicator().GetDataCommunicator(); auto global_solution_norms_vector = r_data_comm.SumAll(solution_norms_vector); auto global_increase_norms_vector = r_data_comm.SumAll(increase_norms_vector); auto global_dofs_count = r_data_comm.SumAll(dofs_count); // Check division by zero in global solution norms const double zero_tol = 1.0e-12; for(int i = 0; i < mVariableSize; i++) { if (global_solution_norms_vector[i] < zero_tol) { global_solution_norms_vector[i] = 1.0; } } // Calculate the norm values std::vector<TDataType> var_ratio(mVariableSize, 0.0); std::vector<TDataType> var_abs(mVariableSize, 0.0); for(int i = 0; i < mVariableSize; i++) { var_ratio[i] = std::sqrt(global_increase_norms_vector[i] / global_solution_norms_vector[i]); var_abs[i] = std::sqrt(global_increase_norms_vector[i]) / static_cast<TDataType>(global_dofs_count[i]); } // Output the ratio and absolute norms as a tuple return std::make_tuple(var_ratio, var_abs); } /** * @brief Method to output the convergence status * This method prints the convergence status to the screen for each one of the checked variables * @param rConvergenceNorms Tuple containing the absolute and relative convergence values */ virtual void OutputConvergenceStatus( const std::tuple<std::vector<TDataType>, std::vector<TDataType>>& rConvergenceNorms) { const auto& var_ratio = std::get<0>(rConvergenceNorms); const auto& var_abs = std::get<1>(rConvergenceNorms); if (this->GetEchoLevel() > 0) { std::ostringstream stringbuf; stringbuf << "CONVERGENCE CHECK:\n"; const int max_length_var_name = (*std::max_element(mVariableDataVector.begin(), mVariableDataVector.end(), [](const VariableData* p_var_data_1, const VariableData* p_var_data_2){ return p_var_data_1->Name().length() < p_var_data_2->Name().length(); }))->Name().length(); for(int i = 0; i < mVariableSize; i++) { const auto r_var_data = mVariableDataVector[i]; const int key_map = mLocalKeyMap[r_var_data->Key()]; const std::string space_str(max_length_var_name-r_var_data->Name().length(), ' '); stringbuf << " " << r_var_data->Name() << space_str <<" : ratio = " << var_ratio[key_map] << "; exp.ratio = " << mRatioToleranceVector[key_map] << " abs = " << var_abs[key_map] << " exp.abs = " << mAbsToleranceVector[key_map] << "\n"; } KRATOS_INFO("") << stringbuf.str(); } } /** * @brief Method to check convergence * This method checks the convergence of the provided norms with the user-defined tolerances * @param rConvergenceNorms Tuple containing the absolute and relative convergence values * @return true Convergence is satisfied * @return false Convergence is not satisfied */ bool CheckConvergence( const std::tuple<std::vector<TDataType>, std::vector<TDataType>>& rConvergenceNorms) { bool is_converged = true; const auto& var_ratio = std::get<0>(rConvergenceNorms); const auto& var_abs = std::get<1>(rConvergenceNorms); for (int i = 0; i < mVariableSize; i++) { const auto r_var_data = mVariableDataVector[i]; const int key_map = mLocalKeyMap[r_var_data->Key()]; is_converged &= var_ratio[key_map] <= mRatioToleranceVector[key_map] || var_abs[key_map] <= mAbsToleranceVector[key_map]; } // Note that this check ensures that all the convergence variables fulfil either the relative or the absolute criterion if (is_converged) { KRATOS_INFO_IF("", this->GetEchoLevel() > 0) << "*** CONVERGENCE IS ACHIEVED ***" << std::endl; return true; } else { return false; } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Private Static Member Variables ///@{ ///@} ///@name Private Member Variables ///@{ const int mVariableSize; const std::vector<const VariableData*> mVariableDataVector; const std::vector<TDataType> mRatioToleranceVector; const std::vector<TDataType> mAbsToleranceVector; std::unordered_map<KeyType, KeyType> mLocalKeyMap; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief Get the Norm Values * This function accumulates the solution and increment norm values in the provided arrays. * Note that these arrays are assumed to be already initialized to zero. * @param rModelPart Reference to the ModelPart containing the fluid problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rDx Vector of results (variations on nodal variables) * @param rDofsCount Array containing the number of DOFs per variable * @param rSolutionNormsVector Array containing the solution norms accumulated values for each variable checked * @param rIncreaseNormsVector Array containing the correction norms accumulated values for each variable checked */ virtual void GetNormValues( const ModelPart& rModelPart, const DofsArrayType& rDofSet, const TSystemVectorType& rDx, std::vector<int>& rDofsCount, std::vector<TDataType>& rSolutionNormsVector, std::vector<TDataType>& rIncreaseNormsVector) { int n_dofs = rDofSet.size(); // Loop over Dofs #pragma omp parallel { // Local thread variables int dof_id; TDataType dof_dx; TDataType dof_value; // Local reduction variables std::vector<TDataType> var_solution_norm_reduction(mVariableSize); std::vector<TDataType> var_correction_norm_reduction(mVariableSize); std::vector<int> dofs_counter_reduction(mVariableSize); for (int i = 0; i < mVariableSize; i++) { var_solution_norm_reduction[i] = 0.0; var_correction_norm_reduction[i] = 0.0; dofs_counter_reduction[i] = 0; } #pragma omp for for (int i = 0; i < n_dofs; i++) { auto it_dof = rDofSet.begin() + i; if (it_dof->IsFree()) { dof_id = it_dof->EquationId(); dof_value = it_dof->GetSolutionStepValue(0); dof_dx = TSparseSpace::GetValue(rDx, dof_id); const auto &r_current_variable = it_dof->GetVariable(); int var_local_key = mLocalKeyMap[r_current_variable.IsComponent() ? r_current_variable.GetSourceVariable().Key() : r_current_variable.Key()]; var_solution_norm_reduction[var_local_key] += dof_value * dof_value; var_correction_norm_reduction[var_local_key] += dof_dx * dof_dx; dofs_counter_reduction[var_local_key]++; } } #pragma omp critical { for (int i = 0; i < mVariableSize; i++) { rDofsCount[i] += dofs_counter_reduction[i]; rSolutionNormsVector[i] += var_solution_norm_reduction[i]; rIncreaseNormsVector[i] += var_correction_norm_reduction[i]; } } } } /** * @brief This method generates the list of variables from Parameters * @param ThisParameters Input parameters * @return List of variables considered as input */ static ConvergenceVariableListType GenerateConvergenceVariableListFromParameters(Kratos::Parameters ThisParameters) { // Iterate over variables ConvergenceVariableListType aux_list; if (!ThisParameters.Has("convergence_variables_list")) return aux_list; Kratos::Parameters convergence_variables_list = ThisParameters["convergence_variables_list"]; for (auto param : convergence_variables_list) { if (param.Has("variable")) { const std::string& r_variable_name = param["variable"].GetString(); // Variable pointer const VariableData* p_variable = KratosComponents<Variable<double>>::Has(r_variable_name) ? dynamic_cast<const VariableData*>(&KratosComponents<Variable<double>>::Get(r_variable_name)) : dynamic_cast<const VariableData*>(&KratosComponents<Variable<array_1d<double, 3>>>::Get(r_variable_name)); // Tolerances const double rel_tol = param.Has("relative_tolerance") ? param["relative_tolerance"].GetDouble() : 1.0e-4; const double abs_tol = param.Has("absolute_tolerance") ? param["absolute_tolerance"].GetDouble() : 1.0e-9; // Push back list aux_list.push_back(std::make_tuple(p_variable, rel_tol, abs_tol)); } } return aux_list; } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; ///@} // Kratos classes ///@} // Application group } #endif // KRATOS_MIXED_GENERIC_CRITERIA_H
move_particle_utility.h
// KRATOS ___ ___ _ ___ __ ___ ___ ___ ___ // / __/ _ \| \| \ \ / /__| \_ _| __| __| // | (_| (_) | .` |\ V /___| |) | || _|| _| // \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pablo Becker // #if !defined(KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED) #define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" /// #include "includes/dof.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" /// #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "convection_diffusion_application.h" #include "convection_particle.h" #include "utilities/openmp_utils.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveParticleUtilityScalarTransport { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; //typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef typename Configure::ContainerType ContainerType; //typedef Configure::PointerType PointerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; //typedef Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< Convection_Particle, Convection_Particle*, std::vector<Convection_Particle*> > ParticlePointerVector; //typedef Configure::ContactPairType ContactPairType; //typedef Configure::ContainerContactType ContainerContactType; //typedef Configure::IteratorContactType IteratorContactType; //typedef Configure::PointerContactType PointerContactType; //typedef Configure::PointerTypeIterator PointerTypeIterator; KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityScalarTransport); //template<unsigned int TDim> MoveParticleUtilityScalarTransport(ModelPart& model_part, int maximum_number_of_particles) : mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) , mUnknownVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetUnknownVariable()) , mProjectionVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetProjectionVariable()) , mVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetVelocityVariable()) , mMeshVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetMeshVelocityVariable()) { std::cout << "initializing moveparticle utility for scalar transport" << std::endl; Check(); //storing water and air density and their inverses, just in case it is needed for the streamline integration //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id(); int node_id=0; // we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used) ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator pnode = inodebegin+ii; array_1d<double,3> position_node; double distance=0.0; position_node = pnode->Coordinates(); GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES); //we loop all the nodes to check all the edges const double number_of_neighbours = double(rneigh.size()); for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++) { array_1d<double,3> position_difference; position_difference = inode->Coordinates() - position_node; double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2)); //if (current_distance>distance) // distance=current_distance; distance += current_distance / number_of_neighbours; } //and we save the largest edge. pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance; node_id=pnode->GetId(); } } mlast_node_id=node_id; //we also calculate the element mean size in the same way, for the courant number //also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double mElemSize; array_1d<double,3> Edge(3,0.0); Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates(); mElemSize = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) mElemSize += Edge[d]*Edge[d]; for (unsigned int i = 2; i < (TDim+1); i++) for(unsigned int j = 0; j < i; j++) { Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates(); double Length = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) Length += Edge[d]*Edge[d]; if (Length < mElemSize) mElemSize = Length; } mElemSize = sqrt(mElemSize); ielem->GetValue(MEAN_SIZE) = mElemSize; } } //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mnelems = mr_model_part.Elements().size(); std::cout << "about to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mparticles_vector.resize(mnelems*mmaximum_number_of_particles); //and this vector contains the current number of particles that are in each element (currently zero) mnumber_of_particles_in_elems.resize(mnelems); mnumber_of_particles_in_elems=ZeroVector(mnelems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mnumber_of_particles_in_elems_aux.resize(mnelems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mvector_of_particle_pointers_vectors.resize(mnelems); //int artz; //std::cin >> artz; int i_int=0; //careful! it's not the id, but the position inside the array! std::cout << "about to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one moffset=0; //Convection_Particle& firstparticle =mparticles_vector[0]; for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2, &firstparticle ); //ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //now we link the mpointers_to_particle_pointers_vectors to the corresponding element //mpointers_to_particle_pointers_vectors(ii) = &particle_pointers; //now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half). //for(int j=0; j<(mmaximum_number_of_particles*2); j++) // particle_pointers.push_back(&firstparticle); mvector_of_particle_pointers_vectors[ii] = ParticlePointerVector( mmaximum_number_of_particles*2 ); ParticlePointerVector& particle_pointers = mvector_of_particle_pointers_vectors[ii]; //int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles = mnumber_of_particles_in_elems[ii]; number_of_particles=0; Geometry< Node<3> >& geom = ielem->GetGeometry(); //unsigned int elem_id = ielem->Id(); //mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; Convection_Particle& pparticle = mparticles_vector[particle_id-1]; pparticle.X()=pos(j,0); pparticle.Y()=pos(j,1); pparticle.Z()=pos(j,2); pparticle.GetEraseFlag()=false; float & scalar1= pparticle.GetScalar1(); scalar1=0.0; for (unsigned int k = 0; k < (TDim+1); k++) { scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mUnknownVar); } particle_pointers(j) = &pparticle; number_of_particles++ ; } ++i_int; } m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. KRATOS_WATCH(m_nparticles); //KRATOS_WATCH(mlast_elem_id); mparticle_printing_tool_initialized=false; //std::cin >> artz; } virtual ~MoveParticleUtilityScalarTransport() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mr_model_part.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); std::cout << "finished mounting Bins" << std::endl; KRATOS_CATCH("") } void CalculateVelOverElemSize() { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double nodal_weight = 1.0/ (1.0 + double (TDim) ); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(mVelocityVar); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->GetValue(MEAN_VEL_OVER_ELEM_SIZE) = mean_velocity / ( ielem->GetValue(MEAN_SIZE) ); } } KRATOS_CATCH("") } //name self explained void ResetBoundaryConditions() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(mUnknownVar)) { inode->FastGetSolutionStepValue(mUnknownVar)=inode->GetSolutionStepValue(mUnknownVar,1); } } } KRATOS_CATCH("") } void CalculateDeltaVariables() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mUnknownVar) - inode->FastGetSolutionStepValue(mProjectionVar) ; } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } //to move all the particles across the streamlines. heavy task! void MoveParticles() { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. //KRATOS_WATCH(offset) bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles //KRATOS_WATCH(post_offset) double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; //double integration_distance= 2.0; max_nsubsteps = 10; max_substep_dt=delta_t/double(max_nsubsteps); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; int & number_of_particles = mnumber_of_particles_in_elems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES); mnumber_of_particles_in_elems_aux[ii]=number_of_particles; mnumber_of_particles_in_elems[ii]=0; //we reset the local vectors for a faster access; } } std::cout << "convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) #pragma omp barrier #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); GlobalPointersVector< Element > elements_in_trajectory; elements_in_trajectory.resize(20); for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++) { //for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++) //{ ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem; const int old_element_id = old_element->Id(); ParticlePointerVector& old_element_particle_pointers = mvector_of_particle_pointers_vectors(old_element_id-1); if ( (results.size()) !=max_results) results.resize(max_results); unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem) for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++) { Convection_Particle & pparticle = old_element_particle_pointers[offset+ii]; Element::Pointer pcurrent_element( *old_element.base() ); ResultIteratorType result_begin = results.begin(); bool & erase_flag=pparticle.GetEraseFlag(); if (erase_flag==false){ MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = pcurrent_element->Id(); int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1); //int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1); if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false) { { ParticlePointerVector& current_element_particle_pointers = mvector_of_particle_pointers_vectors(current_element_id-1); #pragma omp critical { if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle; number_of_particles_in_current_elem++ ; if (number_of_particles_in_current_elem>mmaximum_number_of_particles) KRATOS_WATCH("MAL"); } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } } /* //now we pass info from the local vector to the elements: #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; old_element->GetValue(NUMBER_OF_BED_PARTICLES) = mnumber_of_particles_in_elems(ii); //old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii); } } */ //after having changed everything we change the status of the modd_timestep flag: moffset = post_offset;; // KRATOS_CATCH("") } void TransferLagrangianToEulerian() //explicit { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t =CurrentProcessInfo[DELTA_TIME]; const double threshold= 0.0/(double(TDim)+1.0); std::cout << "projecting info to mesh" << std::endl; const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mProjectionVar)=0.0; inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); //array_1d<double,(TDim+1)> weighting_inverse_divisor; Geometry<Node<3> >& geom = ielem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); //weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { //double sq_dist = 0; //these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions //for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k])); //double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) ); double weight=N(j)*N(j); //weight=N(j)*N(j)*N(j); if (weight<threshold) weight=1e-10; if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);} else { nodes_addedweights[j]+= weight; //nodes_addedtemp[j] += weight * particle_temp; nodes_added_scalar1[j] += weight*particle_scalar1; }// } } } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & height = inode->FastGetSolutionStepValue(mProjectionVar); height /=sum_weights; //resetting the density } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); //resetting the temperature } } } KRATOS_CATCH("") } void TransferLagrangianToEulerianImp() //semi implicit { KRATOS_TRY // ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); std::cout << "projecting info to mesh (semi implicit)" << std::endl; const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mProjectionVar)=0.0; inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { //creating a matrix for each of the problems. BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance array_1d<double,(TDim+1)> rhs_scalar1; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; nodes_added_scalar1 = ZeroVector((TDim+1)); //resetting vectors nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes. //mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices rhs_scalar1 = ZeroVector((TDim+1)); //resetting vectors Geometry<Node<3> >& geom = ielem->GetGeometry(); const double elem_volume = geom.Area(); for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { double weight=N(j); for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix mass_matrix(j,k) += weight*N(k); rhs_scalar1[j] += weight * double(particle_scalar1); //adding also a part with the lumped mass matrix to reduce overshoots and undershoots if(true) { double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion nodes_addedweights[j]+= this_particle_weight; nodes_added_scalar1[j] += this_particle_weight*particle_scalar1; } } } } //now we invert the matrix BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1); if(TDim==3) InvertMatrix( mass_matrix, inverse_mass_matrix); else InvertMatrix3x3( mass_matrix, inverse_mass_matrix); //and now compute the elemental contribution to the gobal system: if(number_of_particles_in_elem>(TDim*3)) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless. { for (int i=0 ; i!=(TDim+1); i++) { for (int j=0 ; j!=(TDim+1); j++) { nodes_added_scalar1[i] += inverse_mass_matrix(i,j)*rhs_scalar1[j]*elem_volume*(1.0/(double(1+TDim))); } } //and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level. for (int i=0 ; i!=(TDim+1); i++) nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim))); } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { double & scalar1 = inode->FastGetSolutionStepValue(mProjectionVar); scalar1 /=sum_weights; //resetting the density } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); } } } KRATOS_CATCH("") } void CorrectParticlesWithoutMovingUsingDeltaVariables() { KRATOS_TRY //std::cout << "updating particles" << std::endl; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** template< class TDataType > void AddUniqueWeakPointer (GlobalPointersVector< TDataType >& v, const typename TDataType::WeakPointer candidate) { typename GlobalPointersVector< TDataType >::iterator i = v.begin(); typename GlobalPointersVector< TDataType >::iterator endit = v.end(); while ( i != endit && (i)->Id() != (candidate)->Id()) { i++; } if( i == endit ) { v.push_back(candidate); } } //************************************************************************************************************** //************************************************************************************************************** void PreReseed(int minimum_number_of_particles) { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset =moffset; const int max_results = 1000; //tools for the paralelization unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) { ResultContainerType results(max_results); int k = OpenMPUtils::ThisThread(); //ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; //ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; //ModelPart::NodesContainerType local_list=aux[k]; //PointerVectorSet<Convection_Particle, IndexedObject> & list=aux[k]; //KRATOS_WATCH(k); BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array //int local_id=1; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; results.resize(max_results); //const int & elem_id = ielem->Id(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; ResultIteratorType result_begin = results.begin(); Element::Pointer pelement( *ielem.base() ); MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; pparticle.GetEraseFlag()=false; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** void PostReseed(int minimum_number_of_particles) //pooyan's way { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //TOOLS FOR THE PARALELIZATION //int last_id= (mr_linea_model_part.NodesEnd()-1)->Id(); unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); //KRATOS_WATCH(number_of_threads); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); //KRATOS_WATCH(number_of_threads); //KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; //typedef Node < 3 > PointType; //std::vector<ModelPart::NodesContainerType> aux;// aux; //aux.resize(number_of_threads); //ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin(); //ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd(); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids { unsigned int reused_particles=0; unsigned int freeparticle = 0; //we start by the first position; int k = OpenMPUtils::ThisThread(); //ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; //ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, (3+2*TDim), (TDim+1) > N; double mesh_scalar1; array_1d<int, (3+2*TDim) > positions; unsigned int number_of_reseeded_particles; //unsigned int number_of_water_reseeded_particles; //array_1d<double, 3 > nodes_distances; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; Geometry< Node<3> >& geom = ielem->GetGeometry(); if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) ) { //bool reseed_more=false; number_of_reseeded_particles=0; //reseed_more=true; number_of_reseeded_particles= 3+2*TDim; ComputeGaussPointPositionsForPostReseed(geom, pos, N); for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { //now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N); if (is_found==false) { KRATOS_WATCH(aux_N); KRATOS_WATCH(j) KRATOS_WATCH(ielem->Id()) } mesh_scalar1 = 0.0; for (unsigned int l = 0; l < (TDim+1); l++) { mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mUnknownVar); } pparticle.GetScalar1()=mesh_scalar1; pparticle.GetEraseFlag()=false; mparticles_vector[freeparticle]=pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; if (keep_looking) { KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", ""); } else { reused_particles++; } } } } } KRATOS_CATCH("") } void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(mUnknownVar); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mUnknownVar) = 0.0; inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter=0; //ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin(); for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++) { Convection_Particle& pparticle =mparticles_vector[i]; if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(mUnknownVar) = pparticle.GetScalar1(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } KRATOS_CATCH("") } protected: private: ///this function moves a particle according to the "velocity" given ///by "rVariable". The movement is performed in nsubsteps, during a total time ///of Dt void MoveParticle( Convection_Particle & pparticle, Element::Pointer & pelement, GlobalPointersVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; //bool have_air_node; //bool have_water_node; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; //DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH SEDIMENT_VELOCITY ////////////////////////////////////////////////////////////////////////////////////////////////////// unsigned int check_from_element_number=0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } only_integral += 1.0; //values saved for the current time step position+=vel*substep_dt;//weight; } else { KEEP_INTEGRATING=false; break; } } else break; } } if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement) if (is_found==false) ( pparticle.GetEraseFlag()=true); pparticle.Coordinates() = position; } void CorrectParticleUsingDeltaVariables( Convection_Particle & pparticle, Element::Pointer & pelement, Geometry< Node<3> >& geom) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates(); float & particle_scalar1 = pparticle.GetScalar1(); //double distance=0.0; double delta_scalar1 = 0.0; bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_WATCH(N) for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 ) N[j]=1e-10; } for(unsigned int j=0; j<(TDim+1); j++) { delta_scalar1 += geom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j]; } particle_scalar1 = particle_scalar1 + delta_scalar1; } void MoveParticle_inverse_way( Convection_Particle & pparticle, Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; array_1d<double,3> vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; double scalar1 = 0.0; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); scalar1=0.0; for(unsigned int j=0; j<(TDim+1); j++) { scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j); noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position -= vel*substep_dt;//weight; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); scalar1=0.0; for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j] ; scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j); } only_integral += 1.0;//weight ; //values saved for the current time step position-=vel*substep_dt;//weight; } else KEEP_INTEGRATING=false; } } pparticle.GetScalar1()=scalar1; } //else {KRATOS_WATCH(position); } } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) //that was easy! { return true; } //to begin with we check the neighbour elements; it is a bit more expensive GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=neighb_elems(i)->shared_from_this(); return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //if nothing worked, then: //not found case return false; } // VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, GlobalPointersVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, unsigned int & check_from_element_number, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { return true; //that was easy! } //if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++) { Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=elements_in_trajectory(i)->shared_from_this(); N=aux_N; check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } //now we check the neighbour elements: auto& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=neighb_elems(i)->shared_from_this(); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /////////////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& z0 = nodes_positions[2]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& z1 = nodes_positions[5]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; const double& z2 = nodes_positions[8]; const double& x3 = nodes_positions[9]; const double& y3 = nodes_positions[10]; const double& z3 = nodes_positions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N) { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D { //std::cout << "NEW ELEMENT" << std::endl; //double total; double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { //std::cout << "inside i" << i << std::endl; for (unsigned int j=0; j!=(4-i);j++) { //std::cout << "inside j" << j << std::endl; for (unsigned int k=0; k!=(4-i-j);k++) { //std::cout << "inside k" << k << std::endl; N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } } template<class T> bool InvertMatrix(const T& input, T& inverse) { typedef permutation_matrix<std::size_t> pmatrix; // create a working copy of the input T A(input); // create a permutation matrix for the LU-factorization pmatrix pm(A.size1()); // perform LU-factorization int res = lu_factorize(A, pm); if (res != 0) return false; // create identity matrix of "inverse" inverse.assign(identity_matrix<double> (A.size1())); // backsubstitute to get the inverse lu_substitute(A, pm, inverse); return true; } bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result) { double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2)) -A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0)) +A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0)); double invdet = 1/determinant; result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet; result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet; result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet; result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet; result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet; result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet; result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet; result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet; result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet; return true; } virtual int Check() { KRATOS_TRY ProcessInfo& rCurrentProcessInfo = mr_model_part.GetProcessInfo(); if (rCurrentProcessInfo.Has(CONVECTION_DIFFUSION_SETTINGS)==false) KRATOS_THROW_ERROR(std::logic_error, "no CONVECTION_DIFFUSION_SETTINGS in model_part", ""); //std::cout << "ConvDiff::Check(). If crashes, check CONVECTION_DIFFUSION_SETTINGS is defined" << std::endl; ConvectionDiffusionSettings::Pointer my_settings = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS); //UNKNOWN VARIABLE if(my_settings->IsDefinedUnknownVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetUnknownVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable not defined!", ""); //PROJECTION VARIABLE //used as intermediate variable, is the variable at time n+1 but only accounting for the convective term. if(my_settings->IsDefinedProjectionVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetProjectionVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Projection Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "No Projection variable assigned for ConvDiff!", ""); //CONVECTION VELOCITY VARIABLE //CURRENTLY WE ARE USING (VELOCITY -MESH_VELOCITY) TO CONVECT, so the ConvectionVariable must not be used: //if(my_settings->IsDefinedConvectionVariable()==true) //{ // if (BaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(my_settings->GetConvectionVariable()) == false) // KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Convection Variable defined but not contained in the model part", ""); //} //else // std::cout << "No Projection variable assigned for ConvDiff. Assuming Convection=0" << std::endl; if(my_settings->IsDefinedConvectionVariable()==true) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: ConvectionVariable not used. Use VelocityVariable instead", ""); //VELOCITY VARIABLE if(my_settings->IsDefinedVelocityVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetVelocityVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Velocity Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "No Velocity variable assigned for ConvDiff!", ""); if (mr_model_part.NodesBegin()->SolutionStepsDataHas(MEAN_SIZE) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MEAN_SIZE variable to model part!", ""); if (mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_SCALAR1) == false) KRATOS_THROW_ERROR(std::logic_error, "Add DELTA_SCALAR1 variable to model part!", ""); return 0; KRATOS_CATCH("") } ModelPart& mr_model_part; int m_nparticles; int mnelems; int moffset; //vector<double> mareas_vector; UNUSED SO COMMENTED int max_nsubsteps; double max_substep_dt; int mmaximum_number_of_particles; std::vector< Convection_Particle > mparticles_vector; //Point<3> int mlast_elem_id; bool modd_timestep; bool mparticle_printing_tool_initialized; unsigned int mfilter_factor; unsigned int mlast_node_id; //ModelPart& mr_particle_model_part; vector<int> mnumber_of_particles_in_elems; vector<int> mnumber_of_particles_in_elems_aux; //vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; //pointing to the GetValue of each element vector<ParticlePointerVector> mvector_of_particle_pointers_vectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; const Variable<double>& mUnknownVar; const Variable<double>& mProjectionVar; const Variable<array_1d<double,3> >& mVelocityVar; const Variable<array_1d<double,3> >& mMeshVelocityVar; }; } // namespace Kratos. #endif // KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED defined
openmp.c
/* * Copyright (c) 2003, 2007-11 Matteo Frigo * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ /* openmp.c: thread spawning via OpenMP */ #include "threads.h" #if !defined(_OPENMP) #error OpenMP enabled but not using an OpenMP compiler #endif int X(ithreads_init)(void) { return 0; /* no error */ } /* Distribute a loop from 0 to loopmax-1 over nthreads threads. proc(d) is called to execute a block of iterations from d->min to d->max-1. d->thr_num indicate the number of the thread that is executing proc (from 0 to nthreads-1), and d->data is the same as the data parameter passed to X(spawn_loop). This function returns only after all the threads have completed. */ void X(spawn_loop)(int loopmax, int nthr, spawn_function proc, void *data) { int block_size; spawn_data d; int i; A(loopmax >= 0); A(nthr > 0); A(proc); if (!loopmax) return; /* Choose the block size and number of threads in order to (1) minimize the critical path and (2) use the fewest threads that achieve the same critical path (to minimize overhead). e.g. if loopmax is 5 and nthr is 4, we should use only 3 threads with block sizes of 2, 2, and 1. */ block_size = (loopmax + nthr - 1) / nthr; nthr = (loopmax + block_size - 1) / block_size; THREAD_ON; /* prevent debugging mode from failing under threads */ #pragma omp parallel for private(d) for (i = 0; i < nthr; ++i) { d.max = (d.min = i * block_size) + block_size; if (d.max > loopmax) d.max = loopmax; d.thr_num = i; d.data = data; proc(&d); } THREAD_OFF; /* prevent debugging mode from failing under threads */ } void X(threads_cleanup)(void) { }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_binop__lxor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_int32) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_int32) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_int32) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int32) // A*D function (colscale): GB (_AxD__lxor_int32) // D*A function (rowscale): GB (_DxB__lxor_int32) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int32) // C=scalar+B GB (_bind1st__lxor_int32) // C=scalar+B' GB (_bind1st_tran__lxor_int32) // C=A+scalar GB (_bind2nd__lxor_int32) // C=A'+scalar GB (_bind2nd_tran__lxor_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_INT32 || GxB_NO_LXOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) { for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(24*t3+Nx+20,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),64*t4+62),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
pr27388-3.c
/* PR middle-end/27388 */ /* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-omplower" } */ extern void bar (int); void foo (void) { int i = 0, j = 0; #pragma omp parallel firstprivate (i) private (j) { #pragma omp for for (i = 0; i < 2; i++) bar (i); #pragma omp for for (j = 0; j < 2; j++) bar (j); } } /* { dg-final { scan-tree-dump-times "omp for\[^\\n\]*private" 2 "omplower" } } */
SumaVectoresC-Ej7.c
/* SumaVectoresC.c Suma de dos vectores: v3 = v1 + v2 Para compilar usar (-lrt: real time library): gcc -O2 SumaVectores.c -o SumaVectores –lrt gcc -O2 –S SumaVectores.c –lrt //para generar el código ensamblador Para ejecutar use: SumaVectoresC longitud */ #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() #include <time.h> // biblioteca donde se encuentra la función clock_gettime() #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif //#define PRINTF_ALL // comentar para quitar el printf ... // que imprime todos los componentes //Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ... //tres defines siguientes puede estar descomentado): //#define VECTOR_LOCAL // descomentar para que los vectores sean variables ... // locales (si se supera el tamaño de la pila se ... // generará el error "Violación de Segmento") #define VECTOR_GLOBAL // descomentar para que los vectores sean variables ... // globales (su longitud no estará limitada por el ... // tamaño de la pila del programa) //#define VECTOR_DYNAMIC // descomentar para que los vectores sean variables ... // dinámicas (memoria reutilizable durante la ejecución) #ifdef VECTOR_GLOBAL #define MAX 33554432 //=2^25 //#define MAX 4294967295//=(2^32) -1 double v1[MAX], v2[MAX], v3[MAX]; #endif int main(int argc, char** argv){ int i; double cgt1,cgt2; //struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución //Leer argumento de entrada (no de componentes del vector) if (argc<2){ printf("Faltan no componentes del vector\n"); exit(-1); } unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B) #ifdef VECTOR_LOCAL double v1[N], v2[N], v3[N]; // Tamaño variable local en tiempo de ejecución ... // disponible en C a partir de actualización C99 #endif #ifdef VECTOR_GLOBAL if (N>MAX) N=MAX; #endif #ifdef VECTOR_DYNAMIC double *v1, *v2, *v3; v1 = (double*) malloc(N*sizeof(double)); // malloc necesita el tamaño en bytes v2 = (double*) malloc(N*sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL v3 = (double*) malloc(N*sizeof(double)); if ( (v1==NULL) || (v2==NULL) || (v3==NULL) ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } #endif //Inicializar vectores #pragma omp parallel { #pragma omp for for(i=0; i<N; i++){ v1[i] = N*0.1+i*0.1; v2[i] = N*0.1-i*0.1; //los valores dependen de N } #pragma omp single { cgt1 = omp_get_wtime(); } //Calcular suma de vectores #pragma omp for for(i=0; i<N; i++) v3[i] = v1[i] + v2[i]; #pragma omp single { cgt2 = omp_get_wtime(); } } ncgt = cgt2-cgt1; //calculo el tiempo que ha transcurrido /*Inicializar vectores for(i=0; i<N; i++){ v1[i] = N*0.1+i*0.1; v2[i] = N*0.1-i*0.1; //los valores dependen de N } clock_gettime(CLOCK_REALTIME,&cgt1); Calcular suma de vectores for(i=0; i<N; i++) v3[i] = v1[i] + v2[i];*/ /*clock_gettime(CLOCK_REALTIME,&cgt2); ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+ (double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9)); */ //Imprimir resultado de la suma y el tiempo de ejecución #ifdef PRINTF_ALL printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",ncgt,N); for(i=0; i<N; i++) printf("/ V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n", i,i,i,v1[i],v2[i],v3[i]); #else printf("Tiempo(seg.):%11.9f\n/ Tamaño Vectores:%u\n/ V1[0]+V2[0]=V3[0](%8.6f+%8.6f=%8.6f) / \n/ V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n", ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]); #endif #ifdef VECTOR_DYNAMIC free(v1); // libera el espacio reservado para v1 free(v2); // libera el espacio reservado para v2 free(v3); // libera el espacio reservado para v3 #endif return 0; }
depth_to_space.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_DEPTH_TO_SPACE_H_ #define MACE_KERNELS_DEPTH_TO_SPACE_H_ #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/kernel.h" #include "mace/public/mace.h" #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { namespace kernels { template<DeviceType D, typename T> struct DepthToSpaceOpFunctor : OpKernel { DepthToSpaceOpFunctor(OpKernelContext *context, const int block_size) : OpKernel(context), block_size_(block_size) {} MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); const index_t batch_size = input->dim(0); const index_t input_depth = input->dim(1); const index_t input_height = input->dim(2); const index_t input_width = input->dim(3); MACE_CHECK(input_depth % (block_size_ * block_size_) == 0, "input depth should be dividable by block_size * block_size", input_depth); const index_t output_depth = input_depth / (block_size_ * block_size_); const index_t output_width = input_width * block_size_; const index_t output_height = input_height * block_size_; std::vector<index_t> output_shape = {batch_size, output_depth, output_height, output_width}; MACE_RETURN_IF_ERROR(output->Resize(output_shape)); Tensor::MappingGuard logits_guard(input); Tensor::MappingGuard output_guard(output); const T *input_ptr = input->data<T>(); T *output_ptr = output->mutable_data<T>(); #pragma omp parallel for for (index_t b = 0; b < batch_size; ++b) { for (index_t d = 0; d < output_depth; ++d) { for (index_t h = 0; h < output_height; ++h) { const index_t in_h = h / block_size_; const index_t offset_h = (h % block_size_); for (int w = 0; w < output_width; ++w) { const index_t in_w = w / block_size_; const index_t offset_w = w % block_size_; const index_t offset_d = (offset_h * block_size_ + offset_w) * output_depth; const index_t in_d = d + offset_d; const index_t o_index = ((b * output_depth + d) * output_height + h) * output_width + w; const index_t i_index = ((b * input_depth + in_d) * input_height + in_h) * input_width + in_w; output_ptr[o_index] = input_ptr[i_index]; } } } } return MACE_SUCCESS; } const int block_size_; }; #ifdef MACE_ENABLE_OPENCL class OpenCLDepthToSpaceKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const Tensor *input, Tensor *output, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLDepthToSpaceKernel); }; template<typename T> struct DepthToSpaceOpFunctor<DeviceType::GPU, T> : OpKernel { DepthToSpaceOpFunctor(OpKernelContext *context, const int block_size); MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future); std::unique_ptr<OpenCLDepthToSpaceKernel> kernel_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_DEPTH_TO_SPACE_H_
testing_dgeqrf.c
/** * * @file example_dpotrf.c * * PLASMA testing routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @brief Example of Cholesky factorization * * @version 2.6.0 * @author Bilel Hadri * @date 2010-11-15 * @generated d Tue Jan 7 11:45:20 2014 * **/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <plasma.h> #include <cblas.h> #include <lapacke.h> #include <core_blas.h> #include "testing_dmain.h" static int check_factorization(int, int, double*, double*, int); static void GENMAT_SYM_FULL(int m, int n, double *A) { srand48(time(NULL)); int j; for (j = 0; j < m; ++j ) { int i; for( i = j; i < n; ++i ) { double dran = drand48(); A[j*m+i] = A[i*m+j] = dran; } } for(j = 0; j < m; ++j) A[j*m+j] += 10 * m; } int testing_dgeqrf(int argc, char **argv) { int M = 1000; int N = 1000; int LDA = 1000; int info_factorization; double *A1 = (double *)malloc(LDA*N*sizeof(double)); double *A2 = (double *)malloc(LDA*N*sizeof(double)); #pragma omp register ([LDA*N]A2) /* Check if unable to allocate memory */ if ((!A1)||(!A2)){ printf("Out of Memory \n "); return EXIT_SUCCESS; } /* Initialize A1 and A2 for Symmetric Positive Matrix */ GENMAT_SYM_FULL(LDA, N, A1); int i; for(i = 0; i < N*LDA; ++i){ A2[i] = A1[i]; } /* Plasma routines */ PLASMA_desc *T; PLASMA_Alloc_Workspace_dgels(M, N, &T); PLASMA_dgeqrf(M, N, A2, LDA, T); /* Check the factorization */ info_factorization = check_factorization( M, N, A1, A2, LDA); if ( info_factorization != 0 ) printf("-- Error in DPOTRF example ! \n"); else printf("-- Run of DPOTRF example successful ! \n"); free(A1); free(A2); return 0; } static int check_factorization(int M, int N, double *A1, double *A2, int LDA) { double Anorm, Rnorm; double Anorm1, Rnorm1; double alpha; int info_factorization; int i,j; double eps; eps = LAPACKE_dlamch_work('e'); // double *Residual = (double *)malloc(M*N*sizeof(double)); // double *L1 = (double *)malloc(M*N*sizeof(double)); // double *L2 = (double *)malloc(M*N*sizeof(double)); double *work = (double *)malloc(N*sizeof(double)); LAPACKE_dgeqrf(LAPACK_COL_MAJOR, M, N, A1, M, work); Rnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, A1, M, work); Anorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, A2, LDA, work); Rnorm1 = PLASMA_dlange(PlasmaInfNorm, M, N, A1, LDA); Anorm1 = PLASMA_dlange(PlasmaInfNorm, M, N, A2, LDA); printf("|Rnorm-Rnorm1|: %e, |Anorm-Anorm1|: %e\n", fabs(Rnorm-Rnorm1), fabs(Anorm-Anorm1)); printf("============\n"); printf("Checking the QR Factorization \n"); printf("-- ||L'L-A||_oo/(||A||_oo.N.eps) = %e \n",fabs(Rnorm-Anorm)/(Anorm)); if ( isnan(fabs(Rnorm-Anorm)/(Anorm)) || (fabs(Rnorm-Anorm)/(Anorm) > 10.0) ){ printf("-- Factorization is suspicious ! \n"); info_factorization = 1; } else{ printf("-- Factorization is CORRECT ! \n"); info_factorization = 0; } // free(Residual); free(L1); free(L2); free(work); return info_factorization; }
fmt-m.c
// driver routines for fmt funcs created by gen-fmt #include <stdlib.h> #include "fmt.h" #include "fmt-m.h" #ifdef USE_CUDA #include "cuda/cuda-fmt-drv.h" #endif int fmt_m_init(void) { int ret=-1; ret = fmt_method1_init(); if (ret<0) return -1; ret = fmt_method2_init(); if (ret<0) return -2; ret = fmt_method3_init(); if (ret<0) return -3; #ifdef USE_CUDA #pragma omp master ret = cuda_fmt_m_init(); if (ret<0) return -4; // atexit( cuda_fmt_m_finalize() ); #endif return 0; }; void (*fmt_m[]) ( const double t, const double coef, double f[] ) = { fmt0_method1, fmt1_method1, fmt2_method1, fmt3_method1, fmt4_method1, fmt5_method1, fmt6_method3, fmt7_method3, fmt8_method3, }; void fmt_mm(const int m, const double t, const double coef, double f[] ) { fmt_m[m](t, coef, f); #if 0 if (m==0) fmt(f, 0, t, coef); else fmt_m[m](t, coef, f); #endif };
tensor_cpu-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2014 by Contributors * \file tensor_cpu-inl.h * \brief implementation of CPU host code * \author Bing Xu, Tianqi Chen */ #ifndef MSHADOW_TENSOR_CPU_INL_H_ #define MSHADOW_TENSOR_CPU_INL_H_ #include <cstring> #include <functional> #include <utility> #include <vector> #include "./base.h" #include "./tensor.h" #include "./packet-inl.h" #include "./dot_engine-inl.h" namespace mshadow { template<> inline void InitTensorEngine<cpu>(int dev_id) { } template<> inline void ShutdownTensorEngine<cpu>(void) { } template<> inline void SetDevice<cpu>(int devid) { } template<> inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle, bool create_dnn_handle, int dev_id) { return new Stream<cpu>(); } template<> inline void DeleteStream<cpu>(Stream<cpu> *stream) { delete stream; } template<int ndim> inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*) os << '('; for (int i = 0; i < ndim; ++i) { if (i != 0) os << ','; os << shape[i]; } // python style tuple if (ndim == 1) os << ','; os << ')'; return os; } template<typename xpu> inline void *AllocHost_(size_t size); template<typename xpu> inline void FreeHost_(void * dptr); #ifdef __CUDACC__ template<> inline void *AllocHost_<gpu>(size_t size) { void *dptr; MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable)); return dptr; } template<> inline void FreeHost_<gpu>(void *dptr) { MSHADOW_CUDA_CALL(cudaFreeHost(dptr)); } #endif template<> inline void *AllocHost_<cpu>(size_t size) { size_t pitch; return packet::AlignedMallocPitch(&pitch, size, 1); } template<> inline void FreeHost_<cpu>(void *dptr) { packet::AlignedFree(dptr); } template<typename xpu, int dim, typename DType> inline void AllocHost(Tensor<cpu, dim, DType> *obj) { obj->stride_ = obj->size(dim - 1); CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost"; void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType)); obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename xpu, int dim, typename DType> inline void FreeHost(Tensor<cpu, dim, DType> *obj) { if (obj->dptr_ == NULL) { LOG(FATAL) << "FreeHost:: double free"; } FreeHost_<xpu>(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) { size_t pitch; void *dptr; if (pad) { dptr = packet::AlignedMallocPitch (&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]); obj->stride_ = static_cast<index_t>(pitch / sizeof(DType)); } else { obj->stride_ = obj->size(dim - 1); dptr = packet::AlignedMallocPitch (&pitch, obj->shape_.Size() * sizeof(DType), 1); } obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename Device, typename DType, int dim> inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) { Tensor<Device, dim, DType> obj(shape); obj.stream_ = stream_; AllocSpace(&obj, pad); MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv)); return obj; } template<int dim, typename DType> inline void FreeSpace(Tensor<cpu, dim, DType> *obj) { packet::AlignedFree(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void Copy(Tensor<cpu, dim, DType> _dst, const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) { #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif CHECK_EQ(_dst.shape_, _src.shape_) << "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_; if (_dst.CheckContiguous() && _src.CheckContiguous()) { memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size()); } else { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); Tensor<cpu, 2, DType> src = _src.FlatTo2D(); for (index_t y = 0; y < dst.size(0); ++y) { memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1)); } } #pragma GCC diagnostic pop } template<typename Saver, typename R, int dim, typename DType, typename E> inline void MapPlan(TRValue<R, cpu, dim, DType> *dst, const expr::Plan<E, DType> &plan) { Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D(); expr::Plan<R, DType> dplan = expr::MakePlan(dst->self()); #ifndef __CUDACC__ #pragma omp parallel for #endif // temp remove openmp, as default setting throttles CPU for (openmp_index_t y = 0; y < shape[0]; ++y) { for (index_t x = 0; x < shape[1]; ++x) { // trust your compiler! -_- they will optimize it Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x)); } } } // code to handle SSE optimization template<bool pass_check, typename Saver, typename R, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine { inline static void Map(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { MapPlan<Saver>(dst, MakePlan(exp.self())); } }; template<typename SV, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E, etype> { inline static void Map(Tensor<cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) && expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) { expr::MapPacketPlan<SV>(dst->self(), expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self())); } else { MapPlan<SV>(dst, MakePlan(exp.self())); } } }; template<typename Saver, typename R, int dim, typename DType, typename E, int etype> inline void MapExp(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass> ::Error_All_Tensor_in_Exp_Must_Have_Same_Type(); Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self()); Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self()); CHECK(eshape[0] == 0 || eshape == dshape) << "Assignment: Shape of Tensors are not consistent with target, " << "eshape: " << eshape << " dshape:" << dshape; MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R, dim, DType, E, etype> ::Map(dst->ptrself(), exp); } template<typename Saver, typename Reducer, typename R, typename DType, typename E, int etype> inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()).FlatTo2D(); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match"; CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor"; // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t x = 0; x < eshape[1]; ++x) { DType res = splan.Eval(0, x); for (index_t y = 1; y < eshape[0]; ++y) { Reducer::Reduce(res, splan.Eval(y, x)); } Saver::template Save<DType>(dplan.REval(0, x), res * scale); } } template<typename Saver, typename Reducer, int dimkeep, typename R, typename DType, typename E, int etype> inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); typedef Shape<expr::ExpInfo<E>::kDim> EShape; EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[dimkeep], dshape[0]) << "MapReduceKeepHighDim::reduction dimension do not match"; // use equvalent form Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep], eshape.ProdShape(dimkeep + 1, EShape::kSubdim), eshape[EShape::kSubdim]); // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t c = 0; c < pshape[1]; ++c) { DType res; Reducer::SetInitValue(res); for (index_t n = 0; n < pshape[0]; ++n) { DType tres; Reducer::SetInitValue(tres); for (index_t y = 0; y < pshape[2]; ++y) { for (index_t x = 0; x < pshape[3]; ++x) { Reducer::Reduce(tres, splan.Eval((n * pshape[1] + c) * pshape[2] + y, x)); } } Reducer::Reduce(res, tres); } Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale)); } } template<typename DType> inline void Softmax(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &energy) { DType mmax = energy[0]; for (index_t x = 1; x < dst.size(0); ++x) { if (mmax < energy[x]) mmax = energy[x]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(0); ++x) { dst[x] = std::exp(energy[x] - mmax); sum += dst[x]; } for (index_t x = 0; x < dst.size(0); ++x) { dst[x] /= sum; } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f + alpha; } else { dst[y][x] = src[y][x] - smooth_grad; } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f + alpha; } else { dst[y][x] = src[y][x] - smooth_grad; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f + alpha; } else { dst[y][x][n] = src[y][x][n] - smooth_grad; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f + alpha; } else { dst[y][x][n] = src[y][x][n] - smooth_grad; } } } } } } template<typename DType> inline void Softmax(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { Softmax(dst[y], energy[y]); } } template<typename DType> inline void Softmax(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t n = 0; n < dst.size(2); ++n) { DType mmax = energy[y][0][n]; for (index_t x = 1; x < dst.size(1); ++x) { if (mmax < energy[y][x][n]) mmax = energy[y][x][n]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = std::exp(energy[y][x][n] - mmax); sum += dst[y][x][n]; } for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] /= sum; } } } } template<bool clip, typename IndexType, typename DType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { const index_t K = dst.shape_[0]; const index_t C = dst.shape_[1]; for (index_t y = 0; y < index.size(0); ++y) { index_t j = index[y]; if (clip) { if (j <= 0) j = 0; else if (j >= K) j = K - 1; } else { j %= K; if (j < 0) j += K; } for (index_t i = 0; i < C; ++i) { dst[j][i] += src[y][i]; } } } // safe accumulation template<bool clip, typename IndexType, typename DType, typename AType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, Tensor<cpu, 2, AType> temp, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { const index_t K = dst.shape_[0]; const index_t C = dst.shape_[1]; for (index_t j = 0; j < K; ++j) { for (index_t i = 0; i < C; ++i) { temp[j][i] = dst[j][i]; } } for (index_t y = 0; y < index.size(0); ++y) { index_t j = index[y]; if (clip) { if (j <= 0) j = 0; else if (j >= K) j = K - 1; } else { j %= K; if (j < 0) j += K; } for (index_t i = 0; i < C; ++i) { temp[j][i] += src[y][i]; } } for (index_t j = 0; j < K; ++j) { for (index_t i = 0; i < C; ++i) { dst[j][i] = temp[j][i]; } } } template<typename IndexType, typename DType> inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& sorted, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < sorted.size(0); ++y) { dst[sorted[y]] += src[index[y]]; } } template<typename IndexType, typename DType> inline void IndexFill(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < index.size(0); ++y) { for (index_t j = 0; j < src.size(1); j++) { dst[index[y]][j] = src[y][j]; } } } template<typename KDType, typename VDType> inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values, bool is_ascend) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); CHECK_EQ(keys.size(0), values.size(0)) << "The sizes of key/value are not equal! keys_size: " << keys.size(0) << "values_size: " << values.size(0); std::vector<size_t> idx(keys.size(0)); std::vector<KDType> keys_vec(keys.size(0)); std::vector<VDType> values_vec(values.size(0)); for (int i = 0; i < keys.size(0); i++) { idx[i] = i; keys_vec[i] = keys[i]; values_vec[i] = values[i]; } if (is_ascend) { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] < keys_vec[i2]; }); } else { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] > keys_vec[i2]; }); } for (index_t i = 0; i < values.size(0); i++) { keys[i] = keys_vec[idx[i]]; values[i] = values_vec[idx[i]]; } } template<typename Device, typename VDType, typename SDType> inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) { // We can sort each segments using two stable sorts SortByKey(values, segments, true); SortByKey(segments, values, true); } // blas related template<typename Device, typename DType> inline void VectorDot(Tensor<Device, 1, DType> dst, const Tensor<Device, 1, DType> &lhs, const Tensor<Device, 1, DType> &rhs) { CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch"; CHECK_EQ(dst.size(0), 1U) << "VectorDot: expect dst to be scalar"; expr::BLASEngine<Device, DType>::SetStream(lhs.stream_); mshadow::expr::BLASEngine<Device, DType>::dot( lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_); } template<bool transpose_left, bool transpose_right, typename Device, typename DType> inline void BatchGEMM(Tensor<Device, 3, DType> dst, const Tensor<Device, 3, DType> &lhs, const Tensor<Device, 3, DType> &rhs, DType alpha, DType beta, Tensor<Device, 1, DType*> workspace) { index_t batch_size = dst.shape_[0]; expr::BLASEngine<Device, DType>::SetStream(dst.stream_); Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1]) : lhs.shape_; Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1]) : rhs.shape_; CHECK_EQ(dst.CheckContiguous(), true); CHECK_EQ(lhs.CheckContiguous(), true); CHECK_EQ(rhs.CheckContiguous(), true); CHECK(sleft[0] == batch_size && sright[0] == batch_size) << "BatchGEMM: batchsize must be equal." << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1]) << "BatchGEMM: matrix shape mismatch" << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(workspace.size(0) >= 3 * batch_size) << "Workspace Size must be bigger than " << 3 * batch_size; CHECK_EQ(workspace.CheckContiguous(), true); // use column major argument to compatible with most BLAS expr::BLASEngine<Device, DType>::batched_gemm (dst.stream_, transpose_right, transpose_left, transpose_right ? rhs.size(1) : rhs.size(2), transpose_left ? lhs.size(2) : lhs.size(1), transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_, rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_, batch_size, workspace.dptr_); } } // namespace mshadow #endif // MSHADOW_TENSOR_CPU_INL_H_
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/policy.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/registry.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->matte == MagickFalse) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringNotFalse(option) == MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); if (gamma != 0.0 && gamma != 1.0) { SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == QuantumRange) return(MagickTrue); if (image->matte != MagickTrue) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity))); else if (opacity > 0) SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/ (MagickRealType) opacity))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; MagickPixelPacket color; ssize_t y; if (image->matte == MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->matte=MagickTrue; GetMagickPixelPacket(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color); status=CompositeImage(complete_mask,OverCompositeOp,mask, mask->page.x-image->page.x,mask->page.y-image->page.y); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; PixelPacket *p; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha))); else if (intensity > 0) SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange)); q++; p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,PixelPacket *q, IndexPacket *indexes,ssize_t x) { if (image->storage_class == PseudoClass) { PixelPacket *color; IndexPacket index; index=(IndexPacket) pixel; if (packet_size == 1) index=(IndexPacket) ScaleQuantumToChar(index); index=ConstrainColormapIndex(image,(ssize_t) index); if (type == 0) SetPixelIndex(indexes+x,index); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(indexes+x); if (type != 0) SetPixelAlpha(color,pixel); SetPixelRGBO(q,color); return; } switch (type) { case -1: { SetPixelAlpha(q,pixel); break; } case -2: case 0: { SetPixelRed(q,pixel); if ((channels < 3) || (type == -2)) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } break; } case -3: case 1: { SetPixelGreen(q,pixel); break; } case -4: case 2: { SetPixelBlue(q,pixel); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,pixel); else if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; const unsigned char *p; IndexPacket *indexes; PixelPacket *q; ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x); q++; } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); mask->matte=MagickFalse; channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MaxTextExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) { layer_info->image->compose=NoCompositeOp; (void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true"); } if (psd_info->mode == CMYKMode) (void) SetImageColorspace(layer_info->image,CMYKColorspace); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) (void) SetImageColorspace(layer_info->image,GRAYColorspace); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->matte=MagickTrue; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); InheritException(exception,&layer_info->image->exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateImage(layer_info->image,MagickFalse); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->matte=MagickTrue; } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->matte=MagickTrue; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; ssize_t i; ssize_t count, j, number_layers; size=GetLayerInfoSize(psd_info,image); status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickFalse); return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image* image,const PSDInfo* psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateImage(image,MagickFalse); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t image_list_length; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } status=ResetImagePixels(image,exception); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize)); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace); } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->matte=MagickFalse; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); image_list_length=GetImageListLength(image); if (has_merged_image != MagickFalse || image_list_length == 1) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (image_list_length == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } image_list_length=GetImageListLength(image); } if (has_merged_image == MagickFalse) { Image *merged; if (image_list_length == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.opacity=TransparentOpacity; (void) SetImageBackgroundColor(image); merged=MergeImageLayers(image,FlattenLayer,exception); if (merged == (Image *) NULL) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; next=image; while (next != (Image *) NULL) { (void) SetImageProfile(next,GetStringInfoName(profile),profile); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=SetMagickInfo("PSB"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Large Document Format"); entry->magick_module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PSD"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Photoshop bitmap"); entry->magick_module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels) { int count; ssize_t i, j; unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { ssize_t i, offset, y; if (next_image->compression == RLECompression) { offset=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) offset+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) offset=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else offset=WriteBlobMSBShort(image,Raw); return((size_t) offset); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate) { MagickBooleanType monochrome; QuantumInfo *quantum_info; const PixelPacket *p; ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,&image->exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(Image *image) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); } return(compact_pixels); } static ssize_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, length, offset_length; ssize_t count; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsGrayImage(next_image,&next_image->exception) != MagickFalse)) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->x_resolution+0.5; y_resolution=2.54*65536.0*image->y_resolution+0.5; units=2; } else { x_resolution=65536.0*image->x_resolution+0.5; y_resolution=65536.0*image->y_resolution+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { ssize_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return((size_t) count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image) { char layer_name[MaxTextExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->matte != MagickFalse) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,&image->exception) != MagickFalse)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType) && (image->storage_class == PseudoClass)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass); if (image->colorspace != CMYKColorspace) num_channels=(image->matte != MagickFalse ? 4UL : 3UL); else num_channels=(image->matte != MagickFalse ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsGrayImage(image,&image->exception) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsGrayImage(image,&image->exception) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *)NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); (void) SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->matte != MagickFalse) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->matte != MagickFalse) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->matte != MagickFalse) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue, &image->exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image); property=(const char *) GetImageProperty(next_image,"label"); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,&image->exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(unsigned char) ( mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0, MagickFalse) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
DRB108-atomic-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* * Test if atomic can be recognized properly. No data races. * */ int main (void) { int a=0; #pragma omp parallel { #pragma omp atomic a+=1; } printf ("a=%d\n",a); return 0; }
spinless_fermion_basis_core.h
#ifndef _SPINLESS_FERMION_BASIS_OP_H #define _SPINLESS_FERMION_BASIS_OP_H #include <complex> #include "hcb_basis_core.h" #include "numpy/ndarraytypes.h" #include "openmp.h" #include "misc.h" namespace basis_general { /* template<class I> void mergeSort(I nums[],I work[],const I left,const I mid,const I right, bool &f_count){ I leftLength = mid - left + 1; I rightLength = right - mid; I * lAr = work; I * rAr = work+leftLength; for (I i = 0; i < leftLength; i++) { lAr[i] = nums[left + i]; } for (I i = 0; i < rightLength; i++) { rAr[i] = nums[mid + 1 + i]; } I i = 0, j = 0, k = left; while (i < leftLength && j < rightLength) { if (lAr[i] >= rAr[j]) { nums[k] = lAr[i]; if(j&1){f_count ^= 1;} i++; } else { nums[k] = rAr[j]; j++; } k++; } //remaining isertions if((j&1) && ((leftLength-i)&1)){f_count ^= 1;} if (i >= leftLength) { //copy remaining elements from right for (; j < rightLength; j++, k++) { nums[k] = rAr[j]; } } else { //copy remaining elements from left for (; i < leftLength; i++, k++) { nums[k] = lAr[i]; } } } //I sort the array using merge sort technique. template<class I> void getf_count(I nums[],I work[], I left, I right, bool &f_count){ if (left < right) { I mid = (I)((int)left + (int)right) / 2; getf_count(nums, work, left, mid, f_count); getf_count(nums, work, (I)(mid + 1), right, f_count); mergeSort(nums, work, left, mid, right, f_count); } } */ template<class I,class P> I inline spinless_fermion_map_bits(I s,const int map[],const int N,P &sign){ I ss = 0; int np = 0; int pos_list[bit_info<I>::bits]; for(int i=N-1;i>=0;--i){ const int j = map[i]; const I n = (s&1); const bool neg = j<0; if(n){ pos_list[np++] = ( neg ? -(j+1) : j); // f_count ^= (neg&&(i&1)); do not change sign based on PH transformation } ss ^= ( neg ? (n^1)<<(N+j) : n<<(N-j-1) ); s >>= 1; } // getf_count(pos_list,work,0,np-1,f_count); int Nswap = countSwaps<I>(pos_list,np); if(Nswap&1){sign *= -1;} return ss; } template<class I,class P> void get_map_sign(I s,I inv,P &sign){ typename bit_info<I>::bit_index_type pos_list[bit_info<I>::bits]; bool f_count = 0; I ne = bit_count(bit_info<I>::eob&s,bit_info<I>::bits-1); // count number of partices on odd sites f_count ^= (ne&1); typename bit_info<I>::bit_index_type n = bit_pos(s,pos_list) - 1; // get bit positions getf_count(pos_list,(typename bit_info<I>::bit_index_type)0,n,f_count); if(f_count){sign *= -1;} } template<class I,class P=signed char> class spinless_fermion_basis_core : public hcb_basis_core<I,P> { public: spinless_fermion_basis_core(const int _N,const bool _pre_check=false) : \ hcb_basis_core<I>::hcb_basis_core(_N,true,_pre_check) { } spinless_fermion_basis_core(const int _N,const int _nt,const int _maps[], \ const int _pers[], const int _qs[],const bool _pre_check=false) : \ hcb_basis_core<I>::hcb_basis_core(_N,_nt,_maps,_pers,_qs,true,_pre_check) {} ~spinless_fermion_basis_core(){} // I map_state(I s,int n_map,int &sign){ // if(general_basis_core<I,P>::nt<=0){ // return s; // } // get_map_sign<I>(s,hcb_basis_core<I>::invs[n_map],sign); // return benes_bwd(&hcb_basis_core<I>::benes_maps[n_map],s^hcb_basis_core<I>::invs[n_map]);; // } // void map_state(I s[],npy_intp M,int n_map,signed char sign[]){ // if(general_basis_core<I,P>::nt<=0){ // return; // } // const tr_benes<I> * benes_map = &hcb_basis_core<I>::benes_maps[n_map]; // const I inv = hcb_basis_core<I>::invs[n_map]; // #pragma omp for schedule(static,1) // for(npy_intp i=0;i<M;i++){ // int temp_sign = sign[i]; // get_map_sign<I>(s[i],inv,temp_sign); // s[i] = benes_bwd(benes_map,s[i]^inv); // sign[i] = temp_sign; // } // } I map_state(I s,int n_map,P &sign){ if(general_basis_core<I,P>::nt<=0){ return s; } const int n = general_basis_core<I,P>::N; return spinless_fermion_map_bits(s,&general_basis_core<I,P>::maps[n_map*n],n,sign); } void map_state(I s[],npy_intp M,int n_map,P sign[]){ if(general_basis_core<I,P>::nt<=0){ return; } const int n = general_basis_core<I,P>::N; const int * map = &general_basis_core<I,P>::maps[n_map*n]; #pragma omp for schedule(static) for(npy_intp i=0;i<M;i++){ s[i] = spinless_fermion_map_bits(s[i],map,n,sign[i]); } } int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){ const I s = r; const I one = 1; for(int j=n_op-1;j>-1;j--){ const int ind = general_basis_core<I,P>::N-indx[j]-1; I f_count = bit_count(r,ind); double sign = ((f_count&1)?-1:1); const I b = (one << ind); const bool a = (bool)((r >> ind)&one); const char op = opstr[j]; switch(op){ case 'z': m *= (a?0.5:-0.5); break; case 'x': m *= sign; r ^= b; break; case 'y': // corresponds to -\sigma^y m *= (a?std::complex<double>(0,-1.0*sign):std::complex<double>(0,+1.0*sign)); r ^= b; break; case 'n': m *= (a?1:0); break; case '+': m *= (a?0:sign); r ^= b; break; case '-': m *= (a?sign:0); r ^= b; break; case 'I': break; default: return -1; } if(m.real()==0 && m.imag()==0){ r = s; break; } } return 0; } }; } #endif
prettyprint.c
/* $Id$ Copyright 1989-2014 MINES ParisTech This file is part of PIPS. PIPS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or any later version. PIPS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with PIPS. If not, see <http://www.gnu.org/licenses/>. */ #ifdef HAVE_CONFIG_H #include "pips_config.h" #endif #ifndef lint char lib_ri_util_prettyprint_c_rcsid[] = "$Id$"; #endif /* lint */ /* * Prettyprint all kinds of ri related data structures * * Modifications: * - In order to remove the extra parentheses, I made the several changes: * (1) At the intrinsic_handler, the third term is added to indicate the * precendence, and accordingly words_intrinsic_precedence(obj) is built * to get the precedence of the call "obj". * (2) words_subexpression is created to distinguish the * words_expression. It has two arguments, expression and * precedence. where precedence is newly added. In case of failure * of words_subexpression , that is, when * syntax_call_p is false, we use words_expression instead. * (3) When words_call is firstly called , we give it the lowest precedence, * that is 0. * Lei ZHOU Nov. 4, 1991 * * - Addition of CMF and CRAFT prettyprints. Only text_loop() has been * modified. * Alexis Platonoff, Nov. 18, 1994 * - Modifications of sentence_area to deal with the fact that * " only one appearance of a symbolic name as an array name in an * array declarator in a program unit is permitted." * (Fortran standard, number 8.1, line 40) * array declarators now only appear with the type declaration, not with the * area. - BC - october 196. * * - Modification of text_entity_declaration to ensure that the OUTPUT of PIPS * can also be used as INPUT; in particular, variable declarations must * appear * before common declarations. BC. * * - neither are DATA statements for non integers (FI/FC) * * - Also, EQUIVALENCE statements are not generated for the moment. BC. * Thay are now??? FC? * * - variable pdl added in most signature to handle derived type * declarations in C; it is the parser declaration list; if a derived * type must be prettyprinted, it must be prettyprinted with all * information if in pdl, and else it must be prettyprinted with no * information. For instance, "struct m {int l; int m}" is the * definition of m. Other references to the type must be * prettyprinted "struct m". The PIPS internal representation does * record derived type declarations. The parser declaration list is * used to desambiguate between the two cases. The problem occurs * in both declarations.c and prettyprint.c because types can * appear in expressions thanks to the sizeof and cast operators. * * Data structures used: * * text: to produce output with multiple lines (a.k.a. "sentence") * and proper indenting; this is a Newgen managed data structure * * words: a list of strings to produce output without any specific * formatting, but text's sentences can be built with words. * * Call graph structure (a slice of it, for C prettyprint): * * text_module * text_named_module * text_statement * text_statement_enclosed: to manage braces * text_instruction: to print a command * c_text_related_entities: to print the declarations * all variables declared share some type * c_text_entities: to declare a list of variables * c_text_entity: to declare a variable; may call * recursively c_text_related_entities to * print out, for instance, a set of membres * words_variable_or_function(): words level * c_words_simplified_entity() * generic_c_words_simplified_entity() */ // To have asprintf: #include <stdlib.h> #include <stdio.h> #include <string.h> #include <ctype.h> #include "linear.h" #include "genC.h" #include "text.h" #include "text-util.h" #include "ri.h" #include "ri-util.h" #include "effects.h" #include "misc.h" #include "properties.h" /* operator precedences are in the [0,100] range */ #define MAXIMAL_PRECEDENCE 100 #define MINIMAL_ARITHMETIC_PRECEDENCE 19 /* Define the markers used in the raw unstructured output when the PRETTYPRINT_UNSTRUCTURED_AS_A_GRAPH property is true: */ #define PRETTYPRINT_UNSTRUCTURED_BEGIN_MARKER "\200Unstructured" #define PRETTYPRINT_UNSTRUCTURED_END_MARKER "\201Unstructured End" #define PRETTYPRINT_UNSTRUCTURED_ITEM_MARKER "\202Unstructured Item" #define PRETTYPRINT_UNSTRUCTURED_SUCC_MARKER "\203Unstructured Successor ->" #define PRETTYPRINT_UNREACHABLE_EXIT_MARKER "\204Unstructured Unreachable" /*===================== Language management ===========*/ /* The prettyprint language */ static language prettyprint_language = language_undefined; /** * @brief please avoid using this function directly, use predicate instead * (see below) * @return the prettyprint language as a newgen language object */ language get_prettyprint_language () { if (prettyprint_language == language_undefined) prettyprint_language = make_language_fortran (); return prettyprint_language; } /** * @return the prettyprint language as a language_utype **/ enum language_utype get_prettyprint_language_tag () { return language_tag (get_prettyprint_language ()); } /** * @return true if the language is f77 **/ bool prettyprint_language_is_fortran_p () { return (get_prettyprint_language_tag () == is_language_fortran); } /** * @return true if the language is f95 **/ bool prettyprint_language_is_fortran95_p () { return (get_prettyprint_language_tag () == is_language_fortran95); } /** * @return true if the language is C **/ bool prettyprint_language_is_c_p () { return (get_prettyprint_language_tag () == is_language_c); } /** * @brief set the prettyprint language according to the property * PRETTYPRINT_LANGUAGE * @description If the property PRETTYPRINT_LANGUAGE is set to the special * value "native" then the language passed in arg is used, usually it's the * module native language. The user can set "F77", "F95", or "C" to force the * prettyprint of a language. */ void set_prettyprint_language_from_property( enum language_utype native ) { if (prettyprint_language == language_undefined) { prettyprint_language = make_language_fortran (); } const char* lang = get_string_property ("PRETTYPRINT_LANGUAGE"); if (strcmp (lang, "F77") == 0) { language_tag (prettyprint_language) = is_language_fortran; } else if (strcmp (lang, "C") == 0) { language_tag (prettyprint_language) = is_language_c; } else if (strcmp (lang, "F95") == 0) { language_tag (prettyprint_language) = is_language_fortran95; } else if (strcmp (lang, "native") == 0) { language_tag (prettyprint_language) = native; } else { pips_internal_error("bad property value for language"); } } /** @brief set the prettyprint language from a newgen language object @param lang, the language to be used to set the prettyprint_language variable, content is copied so caller may free if it was malloced **/ void set_prettyprint_language (language lang) { if (prettyprint_language == language_undefined) prettyprint_language = make_language_fortran (); *prettyprint_language = *lang; } /** @brief set the prettyprint language from a language_utype argument @param lang, the language to be used to set the prettyprint_language variable **/ void set_prettyprint_language_tag (enum language_utype lang) { if (prettyprint_language == language_undefined) prettyprint_language = make_language_fortran (); language_tag (prettyprint_language) = lang; } /* @brief Start a single line comment * @return a string containing the begin of a comment line, language dependent */ string get_comment_sentinel() { switch(get_prettyprint_language_tag()) { case is_language_c: return "//"; case is_language_fortran: return "C"; case is_language_fortran95: return "!"; default: pips_internal_error("language unknown not handled"); return NULL ; } } /* @brief Start a single line comment with continuation (blank spaces) * @return a string containing the begin of a comment line, language dependent */ string get_comment_continuation() { switch(get_prettyprint_language_tag()) { case is_language_c: return "// "; case is_language_fortran: return "C "; case is_language_fortran95: return "! "; default: pips_internal_error("language unknown not handled"); return NULL ; } } unsigned int get_prettyprint_indentation() { if(prettyprint_language_is_fortran_p()) { return 0; } else { return INDENTATION; } } static list words_cast(cast obj, int precedence, list pdl); static list words_sizeofexpression(sizeofexpression obj, bool in_type_declaration, list pdl); static list words_subscript(subscript s, list pdl); static list words_application(application a, list pdl); static text text_forloop(entity module,const char* label,int margin,forloop obj,int n, list pdl); /* This variable is used to disable the precedence system and hence to prettyprint all parentheses, which let the prettyprint reflect the AST. */ static bool precedence_p = true; /* This variable is used to print braces around all blocks including blocks with only one statement. */ static bool prettyprint_all_c_braces_p = false; /* This variable is used to gracefuly print braces around if / else blocks to avoid gcc warnings */ static bool prettyprint_gcc_c_braces_p = false; /******************************************************************* STYLES */ static bool pp_style_p(string s) { return same_string_p(get_string_property(PRETTYPRINT_PARALLEL), s); } #define pp_hpf_style_p() pp_style_p("hpf") #define pp_f90_style_p() pp_style_p("f90") #define pp_craft_style_p() pp_style_p("craft") #define pp_cray_style_p() pp_style_p("cray") #define pp_cmf_style_p() pp_style_p("cmf") #define pp_doall_style_p() pp_style_p("doall") #define pp_do_style_p() pp_style_p("do") #define pp_omp_style_p() pp_style_p("omp") /********************************************************************* MISC */ text empty_text(entity __attribute__ ((unused)) e, int __attribute__ ((unused)) m, statement __attribute__ ((unused)) s) { return make_text(NIL); } static text (*text_statement_hook)(entity, int, statement) = empty_text; /** * @brief checks that the prettyprint hook was actually reset... */ void init_prettyprint(text(*hook)(entity, int, statement)) { pips_assert("prettyprint hook not set", text_statement_hook==empty_text); text_statement_hook = hook; } /** * @brief because some prettyprint functions may be used for debug, so * the last hook set by somebody may have stayed there although * being non sense... */ void close_prettyprint() { text_statement_hook = empty_text; } /* Get the prettyprint format of a C label @param label a string to render @return the printf-format string */ string get_C_label_printf_format(const char* label) { /* If the label begin with a digit, prefix it with a 'l' to be C compatible. Hmmm, this does not verify that there is no such label in the program already... :-( Should be solved quite earlier anyway... */ return isdigit(label[0]) ? "l%s:" : "%s:"; } /** * @brief True is statement "s" can be printed out without enclosing * braces when it is the true branch of a test. This is a special case * because of dangling else clauses. */ /* bool one_liner_true_branch_p(statement s) */ /* { */ /* bool one_p = false; */ /* if(!statement_test_p(s)) */ /* one_p = one_liner_p(s); */ /* else { */ /* test t = instruction_test(statement_instruction(s)); */ /* statement f = test_false(t); */ /* if(!(empty_statement_p(f) || nop_statement_p(f))) */ /* one_p = true; // No need to worry, the else clause exists */ /* else { */ /* // Make sure there is no internal dangling else... */ /* one_p = one_liner_test_p(t); */ /* } */ /* } */ /* return one_p; */ /* } */ /** * @brief True is test "t" contains a non-empty final "else" clause. */ /* bool one_liner_test_p(test t) */ /* { */ /* bool one_liner_p = false; */ /* /\* We must make sure that the final else clause is not empty *\/ */ /* statement f = test_false(t); */ /* if(empty_statement_p(f) || nop_statement_p(f)) */ /* one_liner_p = false; */ /* else if(statement_test_p(f)) { */ /* /\* Go down recursively for "else if" constructs. *\/ */ /* instruction i = statement_instruction(f); */ /* test ft = instruction_test(i); */ /* one_liner_p = one_liner_test_p(ft); */ /* } */ /* else */ /* one_liner_p = true; */ /* return one_liner_p; */ /* } */ /** * @brief Can this statement be printed on one line, without enclosing * braces, if it is embedded in a loop? * * Another test must be used if Statement "s" is embedded in a test a * a true branch. */ bool one_liner_p(statement s) { instruction i = statement_instruction(s); bool yes = (instruction_test_p(i) || instruction_loop_p(i) || instruction_whileloop_p(i) || instruction_call_p(i) || instruction_expression_p(i) || instruction_forloop_p(i) || instruction_goto_p(i) || return_instruction_p(i)); yes = yes && ENDP(statement_declarations(s)); if(!yes && instruction_sequence_p(i)) { list sl = sequence_statements(instruction_sequence(i)); int sc = gen_length(sl); if(sc==1) { /* There may be many lines hidden behind another block construct when code is generated in a non canonical way as for {{x=1;y=2;}} */ instruction ii = statement_instruction(STATEMENT(CAR(sl))); if(instruction_sequence_p(ii)) { /* OK, we could check deeper, but this is only useful for redundant internal representations. Let's forget about niceties such as skipping useless braces. */ yes = false; } else yes = ENDP(statement_declarations(s)); } else yes = (sc < 1) && ENDP(statement_declarations(s)); } return yes; } bool gcc_if_block_braces_required_p(test obj) { statement tb = effective_test_true(obj); if(one_liner_p(tb)) { if (statement_test_p(tb)) { test nested_test = statement_test(tb); statement fb = test_false(nested_test); if (!empty_statement_p(fb)) return prettyprint_gcc_c_braces_p; } } return false; } /***************************************************local variables handling */ static text local_var; static bool local_flg = false; /** * @brief This function either appends the declaration to the text given as a * parameter or return a new text with the declaration */ static text insert_locals (text r) { if (local_flg == true) { if ((r != text_undefined) && (r != NULL)){ MERGE_TEXTS (r, local_var); } else { r = local_var; } local_flg = false; } return r; } /** * @brief This function returns true if BLOCK boundary markers are required. * The function also creates the maker when needed. */ static bool mark_block(unformatted *t_beg, unformatted *t_end, int n, int margin) { bool result = false; if(!get_bool_property("PRETTYPRINT_FOR_FORESYS") && (get_bool_property("PRETTYPRINT_ALL_EFFECTS") || get_bool_property("PRETTYPRINT_BLOCKS"))) result = true; if(result == true) { list pbeg = NIL; list pend = NIL; // Here we need to generate block markers for later use: switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: // Fortran case: comments at the begin of the line pbeg = CHAIN_SWORD (NIL, "BEGIN BLOCK"); pend = CHAIN_SWORD (NIL, "END BLOCK"); *t_beg = make_unformatted(strdup(get_comment_sentinel()), n, margin, pbeg); *t_end = make_unformatted(strdup(get_comment_sentinel()), n, margin, pend); break; case is_language_c: // C case: comments alligned with blocks: pbeg = CHAIN_SWORD(NIL, strdup(get_comment_sentinel())); pend = CHAIN_SWORD(NIL, strdup(get_comment_sentinel())); pbeg = CHAIN_SWORD (pbeg, " BEGIN BLOCK"); pend = CHAIN_SWORD (pend, " END BLOCK"); *t_beg = make_unformatted(NULL, n, margin, pbeg); *t_end = make_unformatted(NULL, n, margin, pend); break; default: pips_internal_error("Language unknown !"); break; } } return result; } /********************************************************************* WORDS */ static int words_intrinsic_precedence(call); static int intrinsic_precedence(const char*); /** * @brief exported for craft */ list words_loop_range(range obj, list pdl) { list pc; call c = syntax_call(expression_syntax(range_increment(obj))); pc = words_subexpression(range_lower(obj), 0, true, pdl); pc = CHAIN_SWORD(pc,", "); pc = gen_nconc(pc, words_subexpression(range_upper(obj), 0, true, pdl)); if (/* expression_constant_p(range_increment(obj)) && */ strcmp( entity_local_name(call_function(c)), "1") == 0 ) return(pc); pc = CHAIN_SWORD(pc,", "); pc = gen_nconc(pc, words_expression(range_increment(obj), pdl)); return(pc); } /** * @brief Output a Fortan-like do-loop range as a C-like for-loop index part. * @description Assume that the increment is an integer so we can generate the * good condition. Since the do-loops are recognized in C program part only * with this assumptions, it is a good assumption. */ list C_loop_range(range obj, entity i, list pdl) { list pc; /* call c = syntax_call(expression_syntax(range_increment(obj))); */ /* Complete the initialization assignment */ pc = words_subexpression(range_lower(obj), 0, true, pdl); pc = CHAIN_SWORD(pc,"; "); /* Check the final bound */ pc = CHAIN_SWORD(pc, entity_user_name(i)); /* Increasing or decreasing index? */ expression inc = range_increment(obj); /* Assume the increment has an integer value with a known sign If The increment is negative, that means the index is tested against a lower bound Else we assume to test against an upper bound */ expression ru = range_upper(obj); /* check if we have something of the form exp -1 as range_upper */ expression ru_minus_one = make_op_exp(PLUS_OPERATOR_NAME, copy_expression(ru), int_to_expression(1) ); /* Additionally, we want to pretty print a strict comparison if certain conditions are met. This could be the default choice , but the impact on the validation would be huge */ set re = get_referenced_entities(ru); bool references_unsigned_entity_p = false; SET_FOREACH(entity,e,re) { references_unsigned_entity_p |= unsigned_type_p(ultimate_type(entity_type(e))); } set_free(re); if( references_unsigned_entity_p ) { if(positive_expression_p(inc)) pc = CHAIN_SWORD(pc, " < "); else if(negative_expression_p(inc)) pc = CHAIN_SWORD(pc, " > "); else { //pips_internal_error("loop range cannot be prettyprinted because increment sign" // " is unknown\n"); pips_user_warning("loop increment sign is unknown: assumed positive\n"); pc = CHAIN_SWORD(pc, " < "); } pc = gen_nconc(pc, words_subexpression(ru_minus_one, 0, true, pdl)); } else { // FI: when inc is not a constant integer, // expression_negative_integer_value_p() always return false if(positive_expression_p(inc)) pc = CHAIN_SWORD(pc, " <= "); else if(negative_expression_p(inc)) pc = CHAIN_SWORD(pc, " >= "); else { //pips_internal_error("loop range cannot be prettyprinted because increment sign" // " is unknown\n"); pips_user_warning("loop increment sign is unknown: assumed positive\n"); pc = CHAIN_SWORD(pc, " <= "); } pc = gen_nconc(pc, words_subexpression(ru, 0, true, pdl)); } free_expression(ru_minus_one); pc = CHAIN_SWORD(pc,"; "); /* Increment the loop index */ pc = CHAIN_SWORD(pc, entity_user_name(i)); pc = CHAIN_SWORD(pc," += "); pc = gen_nconc(pc, words_expression(inc, pdl)); pc = CHAIN_SWORD(pc,")"); return(pc); } /** * @return a list of string */ list words_range(range obj, list pdl) { list pc = NIL; /* if undefined I print a star, why not!? */ if(expression_undefined_p(range_lower(obj))) { pc = CONS(STRING, MAKE_SWORD("*"), NIL); } else { switch(get_prettyprint_language_tag()) { case is_language_fortran: { call c = syntax_call(expression_syntax(range_increment(obj))); pc = CHAIN_SWORD(pc,"(/ (I,I="); pc = gen_nconc(pc, words_expression(range_lower(obj), pdl)); pc = CHAIN_SWORD(pc,","); pc = gen_nconc(pc, words_expression(range_upper(obj), pdl)); if(strcmp(entity_local_name(call_function(c)), "1") != 0) { pc = CHAIN_SWORD(pc,","); pc = gen_nconc(pc, words_expression(range_increment(obj), pdl)); } pc = CHAIN_SWORD(pc,") /)") ; break; } case is_language_fortran95: { // Print the lower bound if != * if(!unbounded_expression_p(range_lower(obj))) { pc = gen_nconc(pc, words_expression(range_lower(obj), pdl)); } // Print the upper bound if != * pc = CHAIN_SWORD(pc,":"); if(!unbounded_expression_p(range_upper(obj))) { pc = gen_nconc(pc, words_expression(range_upper(obj), pdl)); } // Print the increment if != 1 call c = syntax_call(expression_syntax(range_increment(obj))); if(strcmp(entity_local_name(call_function(c)), "1") != 0) { pc = CHAIN_SWORD(pc,":"); pc = gen_nconc(pc, words_expression(range_increment(obj), pdl)); } break; } case is_language_c: /* C does not include ranges, but the PIPS internal representation does. For instance, constant ranges can be useful to express effects or regions for intrinsics. To be discussed with Beatrice: e.g. memcpy(), strncp(). Especially when they are called with constant arguments. */ // FI: we might still want a warning, but the compiler will // choke anyway if this is used to prettyprint some C source code // pips_internal_error("I don't know how to print a range in C !"); // FI: copied from Fortran 95, but we may prefer to see the stars // Print the lower bound if != * if(!unbounded_expression_p(range_lower(obj))) { pc = gen_nconc(pc, words_expression(range_lower(obj), pdl)); } // Print the upper bound if != * pc = CHAIN_SWORD(pc,":"); if(!unbounded_expression_p(range_upper(obj))) { pc = gen_nconc(pc, words_expression(range_upper(obj), pdl)); } // Print the increment if != 1 call c = syntax_call(expression_syntax(range_increment(obj))); if(strcmp(entity_local_name(call_function(c)), "1") != 0) { pc = CHAIN_SWORD(pc,":"); pc = gen_nconc(pc, words_expression(range_increment(obj), pdl)); } break; default: pips_internal_error("Language unknown !"); break; } } return pc; } /** * @description FI: array constructor R433, p. 37 in Fortran 90 standard, can be * used anywhere in arithmetic expressions whereas the triplet notation is * restricted to subscript expressions. The triplet notation is used to define * array sections (see R619, p. 64). * * @return a list of string corresponding to the range */ list words_subscript_range(range obj, list pdl) { list pc = NIL; /* if undefined I print a star, why not!? */ if(expression_undefined_p(range_lower(obj))) { pc = CONS(STRING, MAKE_SWORD("*"), NIL); } else { switch(get_prettyprint_language_tag()) { case is_language_fortran: { call c = syntax_call(expression_syntax(range_increment(obj))); pc = gen_nconc(pc, words_expression(range_lower(obj), pdl)); pc = CHAIN_SWORD(pc,":"); pc = gen_nconc(pc, words_expression(range_upper(obj), pdl)); if(strcmp(entity_local_name(call_function(c)), "1") != 0) { pc = CHAIN_SWORD(pc,":"); pc = gen_nconc(pc, words_expression(range_increment(obj), pdl)); } break; } case is_language_fortran95: { // Print the lower bound if != * if(!unbounded_expression_p(range_lower(obj))) { pc = gen_nconc(pc, words_expression(range_lower(obj), pdl)); } // Print the upper bound if != * pc = CHAIN_SWORD(pc,":"); if(!unbounded_expression_p(range_upper(obj))) { pc = gen_nconc(pc, words_expression(range_upper(obj), pdl)); } // Print the increment if != 1 call c = syntax_call(expression_syntax(range_increment(obj))); if(strcmp(entity_local_name(call_function(c)), "1") != 0) { pc = CHAIN_SWORD(pc,":"); pc = gen_nconc(pc, words_expression(range_increment(obj), pdl)); } break; } case is_language_c: // T is no way to print range in C // The notation with ":" has been chosen to simplify prettyprint { // Print the lower bound if != * if(!unbounded_expression_p(range_lower(obj))) { pc = gen_nconc(pc, words_expression(range_lower(obj), pdl)); } // Print the upper bound if != * pc = CHAIN_SWORD(pc,":"); if(!unbounded_expression_p(range_upper(obj))) { pc = gen_nconc(pc, words_expression(range_upper(obj), pdl)); } // Print the increment if != 1 call c = syntax_call(expression_syntax(range_increment(obj))); if(strcmp(entity_local_name(call_function(c)), "1") != 0) { pc = CHAIN_SWORD(pc,":"); pc = gen_nconc(pc, words_expression(range_increment(obj), pdl)); } break; } default: pips_internal_error("Language unknown !"); break; } } return pc; } /* exported for expression.c * * Should only be used to prettyprint proper C references. */ list words_any_reference(reference obj, list pdl, const char* (*enf)(entity)) { list pc = NIL; string begin_attachment; entity e = reference_variable(obj); if(!ENTITY_ALLOCATABLE_BOUND_P(e)) { /* We don't want to print these special entity, they are there for * internal purpose only */ /* Print the entity first */ pc = CHAIN_SWORD(pc, (*enf)(e)); begin_attachment = STRING(CAR(pc)); /* Let's print the indices now */ if(reference_indices(obj) != NIL) { switch(get_prettyprint_language_tag()) { case is_language_fortran95: case is_language_fortran: { int count = 0; pc = CHAIN_SWORD(pc,"("); FOREACH(EXPRESSION, subscript, reference_indices(obj)) { syntax ssubscript = expression_syntax(subscript); if(count > 0) pc = CHAIN_SWORD(pc,","); else count++; if(syntax_range_p(ssubscript)) { pc = gen_nconc(pc, words_subscript_range(syntax_range(ssubscript), pdl)); } else { pc = gen_nconc(pc, words_subexpression(subscript, 0, true, pdl)); } } pc = CHAIN_SWORD(pc,")"); break; } case is_language_c: { FOREACH(EXPRESSION, subscript, reference_indices(obj)) { syntax ssubscript = expression_syntax(subscript); pc = CHAIN_SWORD(pc, "["); if(syntax_range_p(ssubscript)) { pc = gen_nconc(pc, words_subscript_range(syntax_range(ssubscript), pdl)); } else { pc = gen_nconc(pc, words_subexpression(subscript, 0, true, pdl)); } pc = CHAIN_SWORD(pc, "]"); } break; } default: pips_internal_error("Language unknown !"); } } attach_reference_to_word_list(begin_attachment, STRING(CAR(gen_last(pc))), obj); } return(pc); } list words_reference(reference obj, list pdl) { return words_any_reference(obj, pdl, entity_user_name); } /* Management of alternate returns */ static list set_of_labels_required_for_alternate_returns = list_undefined; void set_alternate_return_set() { ifdebug(1) { pips_assert("The target list is undefined", list_undefined_p(set_of_labels_required_for_alternate_returns)); } set_of_labels_required_for_alternate_returns = NIL; } void reset_alternate_return_set() { ifdebug(1) { pips_assert("The target list is initialized", !list_undefined_p(set_of_labels_required_for_alternate_returns)); } gen_free_list(set_of_labels_required_for_alternate_returns); set_of_labels_required_for_alternate_returns = list_undefined; } void add_target_to_alternate_return_set(entity l) { ifdebug(1) { pips_assert("The target list is initialized", !list_undefined_p(set_of_labels_required_for_alternate_returns)); } set_of_labels_required_for_alternate_returns = gen_once(l, set_of_labels_required_for_alternate_returns); } text generate_alternate_return_targets() { text ral = text_undefined; if(!ENDP(set_of_labels_required_for_alternate_returns)) { list sl = NIL; FOREACH(entity, le, set_of_labels_required_for_alternate_returns) { sentence s1 = sentence_undefined; string str_continue = string_undefined; switch (get_prettyprint_language_tag()) { case is_language_fortran95: case is_language_fortran: str_continue = CONTINUE_FUNCTION_NAME; break; case is_language_c: str_continue = C_CONTINUE_FUNCTION_NAME; break; default: pips_internal_error("Language unknown !"); break; } unformatted u1 = make_unformatted( strdup( label_local_name( le ) ), STATEMENT_NUMBER_UNDEFINED, 0, CONS(STRING, strdup(str_continue), NIL)); s1 = make_sentence(is_sentence_unformatted, u1); sl = gen_nconc(sl, CONS(SENTENCE, s1, NIL)); } ral = make_text(sl); } else { ral = make_text(NIL); } return ral; } /* words_regular_call used for user subroutine and user function and intrinsics called like user function such as MOD(). used also by library static_controlize */ list words_regular_call(call obj, bool is_a_subroutine, list pdl) { list pc = NIL; entity f = call_function(obj); value i = entity_initial(f); type t = entity_type(f); bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES"); if (call_arguments(obj) == NIL) { if (type_statement_p(t)) return (CHAIN_SWORD(pc, entity_local_name(f)+sizeof(LABEL_PREFIX) -1)); if (value_constant_p(i) || value_symbolic_p(i)) { switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: return (CHAIN_SWORD(pc, entity_user_name(f))); break; case is_language_c: if (ENTITY_TRUE_P(f)) return (CHAIN_SWORD(pc, "true")); if (ENTITY_FALSE_P(f)) return (CHAIN_SWORD(pc, "false")); return (CHAIN_SWORD(pc, entity_user_name(f))); break; default: pips_internal_error("Language unknown !"); break; } } } type calltype = call_compatible_type(entity_type(call_function(obj))); bool function_p = type_void_p(functional_result(type_functional(calltype))); if (function_p) { if (is_a_subroutine) { switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, "CALL "); break; case is_language_c: pc = CHAIN_SWORD(pc, ""); break; default: pips_internal_error("Language unknown !"); break; } } else { switch (get_prettyprint_language_tag()) { case is_language_fortran: pips_user_warning("subroutine '%s' used as a function.\n", entity_name(f)); break; case is_language_c: // no warning in C break; case is_language_fortran95: pips_internal_error("Need to update F95 case"); break; default: pips_internal_error("Language unknown !"); break; } } } else if (is_a_subroutine) { switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pips_user_warning("function '%s' used as a subroutine.\n", entity_name(f)); pc = CHAIN_SWORD(pc, "CALL "); break; case is_language_c: // no warning in C pc = CHAIN_SWORD(pc, ""); break; default: pips_internal_error("Language unknown !"); break; } } /* special cases for stdarg builtin macros */ if (ENTITY_VA_END_P(f)) pc = CHAIN_SWORD(pc, "va_end"); else if (ENTITY_VA_START_P(f)) pc = CHAIN_SWORD(pc, "va_start"); else if (ENTITY_VA_COPY_P(f)) pc = CHAIN_SWORD(pc, "va_copy"); /* Special cases for stdio.h */ /* else if (ENTITY__IO_GETC_P(f)) */ /* pc = CHAIN_SWORD(pc, "getc"); */ /* else if (ENTITY__IO_PUTC_P(f)) */ /* pc = CHAIN_SWORD(pc, "putc"); */ else if (ENTITY_ISOC99_SCANF_P(f)) pc = CHAIN_SWORD(pc, ISOC99_SCANF_USER_FUNCTION_NAME); else if (ENTITY_ISOC99_FSCANF_P(f)) pc = CHAIN_SWORD(pc, ISOC99_FSCANF_USER_FUNCTION_NAME); else if (ENTITY_ISOC99_SSCANF_P(f)) pc = CHAIN_SWORD(pc, ISOC99_SSCANF_USER_FUNCTION_NAME); else if (ENTITY_ISOC99_VFSCANF_P(f)) pc = CHAIN_SWORD(pc, ISOC99_VFSCANF_USER_FUNCTION_NAME); else if (ENTITY_ISOC99_VSCANF_P(f)) pc = CHAIN_SWORD(pc, ISOC99_VSCANF_USER_FUNCTION_NAME); else if (ENTITY_ISOC99_VSSCANF_P(f)) pc = CHAIN_SWORD(pc, ISOC99_VSSCANF_USER_FUNCTION_NAME); /* the implied complex operator is hidden... [D]CMPLX_(x,y) -> (x,y) */ else if(!ENTITY_IMPLIED_CMPLX_P(f) && !ENTITY_IMPLIED_DCMPLX_P(f)) pc = CHAIN_SWORD(pc, entity_user_name(f)); /* The corresponding formal parameter cannot be checked by formal_label_replacement_p() because the called modules may not have been parsed yet. */ if(!ENDP(call_arguments(obj))) { list pa = list_undefined; pc = CHAIN_SWORD(pc, "("); for(pa = call_arguments(obj); !ENDP(pa); POP(pa)) { expression eap = EXPRESSION(CAR(pa)); if(get_bool_property("PRETTYPRINT_REGENERATE_ALTERNATE_RETURNS") && expression_call_p(eap) && actual_label_replacement_p(eap)) { /* Alternate return actual argument have been replaced by character strings by the parser. */ entity cf = call_function(syntax_call(expression_syntax(eap))); const char* ls = entity_local_name(cf); string ls1 = malloc(strlen(ls)); /* pips_assert("ls has at least four characters", strlen(ls)>=4); */ /* Get rid of initial and final quotes */ ls1 = strncpy(ls1, ls+1, strlen(ls)-2); *(ls1+strlen(ls)-2) = '\000'; pips_assert("eap must be a call to a constant string", expression_call_p(eap)); if(strcmp(get_string_property("PARSER_SUBSTITUTE_ALTERNATE_RETURNS"), "STOP")!=0) { pc = CHAIN_SWORD(pc, ls1); /* free(ls1); */ } else { /* The actual label cannot always be used because it might have been eliminated as part of dead code by PIPS since it is not used with the STOP option. */ if(label_string_defined_in_current_module_p(ls1+1)) { pc = CHAIN_SWORD(pc, ls1); } else { entity els1 = find_label_entity(get_current_module_name(), ls1+1); /* The assertion may be wrong if this piece of code is used to print intermediate statements */ pips_assert("Label els1 has been defined although it is not used anymore", !entity_undefined_p(els1)); pc = CHAIN_SWORD(pc, ls1); add_target_to_alternate_return_set(els1); } } } else { /* words_expression cannot be called because of the C comma operator which require surrounding parentheses in this context. Be careful with unary minus. */ pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(pa)), ASSIGN_OPERATOR_PRECEDENCE, true/*false*/, pdl)); } if (CDR(pa) != NIL) pc = CHAIN_SWORD(pc, space_p? ", ": ","); } pc = CHAIN_SWORD(pc, ")"); } else if(!type_void_p(functional_result(type_functional(t))) || !is_a_subroutine || prettyprint_language_is_c_p()) { pc = CHAIN_SWORD(pc, "()"); } return pc; } /* To deal with attachment on user module usage. */ static list words_genuine_regular_call(call obj, bool is_a_subroutine, list pdl) { list pc = words_regular_call(obj, is_a_subroutine, pdl); if (call_arguments(obj) != NIL) { /* The call is not used to code a constant: */ //entity f = call_function(obj); //type t = entity_type(f); /* The module name is the first one except if it is a procedure CALL. */ if (type_void_p(functional_result(type_functional(call_compatible_type(entity_type(call_function(obj))))))) attach_regular_call_to_word(STRING(CAR(CDR(pc))), obj); else attach_regular_call_to_word(STRING(CAR(pc)), obj); } return pc; } list words_call_intrinsic(call obj, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, list pdl) { return words_regular_call(obj, true, pdl); } static list words_assign_op(call obj, int precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list pc = NIL, args = call_arguments(obj); int prec = words_intrinsic_precedence(obj); const char* fun = entity_local_name(call_function(obj)); pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(args)), prec, true, pdl)); if (strcmp(fun, MODULO_UPDATE_OPERATOR_NAME) == 0) fun = "%="; else if (strcmp(fun, BITWISE_AND_UPDATE_OPERATOR_NAME) == 0) fun = "&="; else if (strcmp(fun, BITWISE_XOR_UPDATE_OPERATOR_NAME) == 0) fun = "^="; /* FI: space_p could be used here to control spacing around assignment */ pc = CHAIN_SWORD(pc," "); pc = CHAIN_SWORD(pc, fun); pc = CHAIN_SWORD(pc," "); expression exp = expression_undefined; switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: exp = EXPRESSION(CAR(CDR(args))); if (expression_call_p(exp)) { /* = is not a Fortran operator. No need for parentheses ever, even with the parenthesis option */ /* call c = syntax_call(expression_syntax(e)); pc = gen_nconc(pc, words_call(c, 0, true, true, pdl)); */ pc = gen_nconc(pc, words_syntax(expression_syntax(exp), pdl)); } else pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(CDR(args))), prec, true, pdl)); break; case is_language_c: /* Brace expressions are not allowed in standard assignments */ exp = EXPRESSION(CAR(CDR(args))); if (ENTITY_ASSIGN_P(call_function(obj))) { if (brace_expression_p(exp)) { // use GCC constructor extension */ pips_internal_error("this should not happen: a constructor is represnetd as a cas on brace expression\n"); } else { /* Be careful with expression lists, they may require surrounding parentheses. */ pc = gen_nconc(pc, words_subexpression(exp, prec, true, pdl)); } } else { pc = gen_nconc(pc, words_subexpression(exp, prec, true, pdl)); } break; default: pips_internal_error("Language unknown !"); break; } if (prec < precedence || (!precedence_p && precedence > 0)) { pc = CONS(STRING, MAKE_SWORD("("), pc); pc = CHAIN_SWORD(pc, ")"); } return (pc); } static list words_substring_op(call obj, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, list pdl) { /* The substring function call is reduced to a syntactic construct */ list pc = NIL; expression r = expression_undefined; expression l = expression_undefined; expression u = expression_undefined; /* expression e = EXPRESSION(CAR(CDR(CDR(CDR(call_arguments(obj)))))); */ int prec = words_intrinsic_precedence(obj); pips_assert("words_substring_op", gen_length(call_arguments(obj)) == 3 || gen_length(call_arguments(obj)) == 4); r = EXPRESSION(CAR(call_arguments(obj))); l = EXPRESSION(CAR(CDR(call_arguments(obj)))); u = EXPRESSION(CAR(CDR(CDR(call_arguments(obj))))); pc = gen_nconc(pc, words_subexpression(r, prec, true, pdl)); pc = CHAIN_SWORD(pc, "("); pc = gen_nconc(pc, words_subexpression(l, prec, true, pdl)); pc = CHAIN_SWORD(pc, ":"); /* An unknown upper bound is encoded as a call to UNBOUNDED_DIMENSION_NAME and nothing must be printed */ if(syntax_call_p(expression_syntax(u))) { entity star = call_function(syntax_call(expression_syntax(u))); if(star!=CreateIntrinsic(UNBOUNDED_DIMENSION_NAME)) pc = gen_nconc(pc, words_subexpression(u, prec, true, pdl)); } else { pc = gen_nconc(pc, words_subexpression(u, prec, true, pdl)); } pc = CHAIN_SWORD(pc, ")"); return(pc); } static list words_assign_substring_op(call obj, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, list pdl) { /* The assign substring function call is reduced to a syntactic construct */ list pc = NIL; expression e = expression_undefined; int prec = words_intrinsic_precedence(obj); pips_assert("words_substring_op", gen_length(call_arguments(obj)) == 4); e = EXPRESSION(CAR(CDR(CDR(CDR(call_arguments(obj)))))); pc = gen_nconc(pc, words_substring_op(obj, prec, true, pdl)); pc = CHAIN_SWORD(pc, " = "); pc = gen_nconc(pc, words_subexpression(e, prec, true, pdl)); return(pc); } /** * @return the external string representation of the operator * @param name, the pips internal representation of the operator */ static const char* renamed_op_handling (const char* name) { const char* result = name; if ( strcmp(result,PLUS_C_OPERATOR_NAME) == 0 ) result = "+"; else if ( strcmp(result, MINUS_C_OPERATOR_NAME) == 0 ) result = "-"; else if ( strcmp(result,BITWISE_AND_OPERATOR_NAME) == 0 ) result = "&"; else if ( strcmp(result,BITWISE_XOR_OPERATOR_NAME) == 0 ) result = "^"; else if ( strcmp(result,C_AND_OPERATOR_NAME) == 0 ) result = "&&"; else if ( strcmp(result,C_NON_EQUAL_OPERATOR_NAME) == 0 ) result = "!="; else if ( strcmp(result,C_MODULO_OPERATOR_NAME) == 0 ) result = "%"; else if (prettyprint_language_is_c_p()) { if(strcasecmp(result, GREATER_THAN_OPERATOR_NAME)==0) result=C_GREATER_THAN_OPERATOR_NAME; else if(strcasecmp(result, LESS_THAN_OPERATOR_NAME)==0) result=C_LESS_THAN_OPERATOR_NAME; else if(strcasecmp(result,GREATER_OR_EQUAL_OPERATOR_NAME)==0) result=C_GREATER_OR_EQUAL_OPERATOR_NAME; else if(strcasecmp(result,LESS_OR_EQUAL_OPERATOR_NAME)==0) result=C_LESS_OR_EQUAL_OPERATOR_NAME; else if(strcasecmp(result, EQUAL_OPERATOR_NAME) ==0) result=C_EQUAL_OPERATOR_NAME; else if(strcasecmp(result,NON_EQUAL_OPERATOR_NAME)==0) result= "!="; else if(strcasecmp(result,AND_OPERATOR_NAME)==0) result="&&"; else if(strcasecmp(result, OR_OPERATOR_NAME)==0) result=C_OR_OPERATOR_NAME; } return result; } /** @return a list of string with the prettyprint of a omp reduction clause */ static list words_omp_red(call obj, int precedence __attribute__ ((unused)), bool leftmost __attribute__ ((unused)), list pdl) { list result = NIL; entity fct = call_function(obj); result = CHAIN_SWORD(result, entity_user_name(fct)); result = CHAIN_SWORD(result, "("); // the reduction arguments as an expression list list args = call_arguments (obj); pips_assert ("no arguments for reduction clause", args != NIL); int nb_arg = 0; FOREACH (EXPRESSION, arg, args) { if (nb_arg == 0) { // the first argument is an operator and need to be handle separately // because of the intenal management of operator const char* op; syntax syn = expression_syntax (arg); pips_assert ("should be a reference", syntax_tag (syn) == is_syntax_reference); op = entity_local_name (reference_variable (syntax_reference (syn))); op = renamed_op_handling (op); CHAIN_SWORD(result, op); } else { // (nb_arg != 0) result = (nb_arg == 1)? CHAIN_SWORD(result,":") : CHAIN_SWORD(result,","); result = gen_nconc (result, words_expression(arg, pdl)); } nb_arg++; } pips_assert ("reduction clause has at least two arguments", nb_arg > 1); result = CHAIN_SWORD(result, ")"); return result; } // Function written by C.A. Mensi to prettyprint C or Fortran code as C code static list words_nullary_op_c(call obj, int precedence __attribute__ ((unused)), bool leftmost __attribute__ ((unused)), list pdl) { list pc = NIL; list args = call_arguments(obj); entity func = call_function(obj); const char* fname = entity_local_name(func); int nargs = gen_length(args); bool parentheses_p=true; /* STOP and PAUSE and RETURN in Fortran may have 0 or 1 argument. STOP and PAUSE are prettyprinted in C using PIPS specific C functions. */ if(nargs==0){ if(same_string_p(fname,STOP_FUNCTION_NAME)) pc = CHAIN_SWORD(pc, "exit(0)"); else if(same_string_p(fname,RETURN_FUNCTION_NAME) ||same_string_p(fname,C_RETURN_FUNCTION_NAME)) pc = CHAIN_SWORD(pc, "return"); else if(same_string_p(fname,PAUSE_FUNCTION_NAME)) pc = CHAIN_SWORD(pc, "_f77_intrinsics_pause_(0)"); else if(same_string_p(fname,CONTINUE_FUNCTION_NAME)) pc = CHAIN_SWORD(pc, ""); else if ((same_string_p(fname,OMP_OMP_FUNCTION_NAME)) || (same_string_p(fname,OMP_FOR_FUNCTION_NAME)) || (same_string_p(fname,OMP_PARALLEL_FUNCTION_NAME))) pc = CHAIN_SWORD(pc, fname); else pips_internal_error("Unknown nullary operator"); } else if(nargs==1){ expression e = EXPRESSION(CAR(args)); if(same_string_p(fname,STOP_FUNCTION_NAME)){ basic b=expression_basic(e); if(basic_int_p(b)){ // Missing: declaration of exit() if Fortran code handled pc = CHAIN_SWORD(pc, "exit"); } else if(basic_string_p(b)){ pc = CHAIN_SWORD(pc, "_f77_intrinsics_stop_"); } } else if(same_string_p(fname,RETURN_FUNCTION_NAME) ||same_string_p(fname,C_RETURN_FUNCTION_NAME)){ pc = CHAIN_SWORD(pc, "return"); parentheses_p = false; //pips_user_error("alternate returns are not supported in C\n"); } else if(same_string_p(fname, PAUSE_FUNCTION_NAME)){ pc = CHAIN_SWORD(pc, "_f77_intrinsics_pause_"); } else { pips_internal_error("unexpected one argument"); } pc = CHAIN_SWORD(pc, parentheses_p?"(":" "); pc = gen_nconc(pc, words_subexpression(e, precedence, true, pdl)); pc = CHAIN_SWORD(pc, parentheses_p?")":""); } else { pips_internal_error("unexpected arguments"); } return(pc); } // function added for fortran by A. Mensi static list words_nullary_op_fortran(call obj, int precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list pc = NIL; list args = call_arguments(obj); entity func = call_function(obj); const char* fname = entity_local_name(func); if(same_string_p(fname,RETURN_FUNCTION_NAME) ||same_string_p(fname,C_RETURN_FUNCTION_NAME)) pc = CHAIN_SWORD(pc, RETURN_FUNCTION_NAME); else if (same_string_p(fname,OMP_FOR_FUNCTION_NAME)) pc = CHAIN_SWORD(pc, "do"); else pc = CHAIN_SWORD(pc, fname); // STOP and PAUSE and RETURN in fortran may have 0 or 1 argument.A Mensi if(gen_length(args)==1) { if(same_string_p(fname,STOP_FUNCTION_NAME) || same_string_p(fname,PAUSE_FUNCTION_NAME) || same_string_p(fname,RETURN_FUNCTION_NAME) || same_string_p(fname, C_RETURN_FUNCTION_NAME)) { expression e = EXPRESSION(CAR(args)); pc = CHAIN_SWORD(pc, " "); pc = gen_nconc(pc, words_subexpression(e, precedence, true, pdl)); } else { pips_internal_error("unexpected arguments"); } } else if(gen_length(args)>1) { pips_internal_error("unexpected arguments"); } return(pc); } static list words_nullary_op(call obj, int precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list result = NIL; switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: result = words_nullary_op_fortran(obj, precedence, leftmost, pdl); break; case is_language_c: result = words_nullary_op_c(obj, precedence, leftmost, pdl); break; default: pips_internal_error("Language unknown !"); break; } return result; } static list words_io_control(list *iol, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list pc = NIL; list pio = *iol; while (pio != NIL) { syntax s = expression_syntax(EXPRESSION(CAR(pio))); call c; if (! syntax_call_p(s)) { pips_internal_error("call expected"); } c = syntax_call(s); if (strcmp(entity_local_name(call_function(c)), IO_LIST_STRING_NAME) == 0) { *iol = CDR(pio); return(pc); } if (pc != NIL) pc = CHAIN_SWORD(pc, ","); pc = CHAIN_SWORD(pc, entity_local_name(call_function(c))); pc = gen_nconc(pc, words_expression(EXPRESSION(CAR(CDR(pio))), pdl)); pio = CDR(CDR(pio)); } if (pio != NIL) pips_internal_error("bad format"); *iol = NIL; return(pc); } static list words_implied_do(call obj, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list pc = NIL; list pcc; expression index; syntax s; range r; bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES"); pcc = call_arguments(obj); index = EXPRESSION(CAR(pcc)); pcc = CDR(pcc); s = expression_syntax(EXPRESSION(CAR(pcc))); if (! syntax_range_p(s)) { pips_internal_error("range expected"); } r = syntax_range(s); pc = CHAIN_SWORD(pc, "("); MAPL(pcp, { pc = gen_nconc(pc, words_expression(EXPRESSION(CAR(pcp)), pdl)); if (CDR(pcp) != NIL) pc = CHAIN_SWORD(pc, space_p? ", " : ","); }, CDR(pcc)); pc = CHAIN_SWORD(pc, space_p? ", " : ","); pc = gen_nconc(pc, words_expression(index, pdl)); pc = CHAIN_SWORD(pc, " = "); pc = gen_nconc(pc, words_loop_range(r, pdl)); pc = CHAIN_SWORD(pc, ")"); return(pc); } static list words_unbounded_dimension(call __attribute__ ((unused)) obj, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, list __attribute__ ((unused)) pdl) { list pc = NIL; pc = CHAIN_SWORD(pc, "*"); return(pc); } static list words_list_directed(call __attribute__ ((unused)) obj, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, list __attribute__ ((unused)) pdl) { list pc = NIL; pc = CHAIN_SWORD(pc, "*"); return(pc); } static list words_io_inst(call obj, int precedence, bool leftmost, list pdl) { list pc = NIL; list pcio = call_arguments(obj); list pio_write = pcio; bool good_fmt = false; bool good_unit = false; bool iolist_reached = false; bool complex_io_control_list = false; expression fmt_arg = expression_undefined; expression unit_arg = expression_undefined; const char* called = entity_local_name(call_function(obj)); bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES"); /* AP: I try to convert WRITE to PRINT. Three conditions must be fullfilled. The first, and obvious, one, is that the function has to be WRITE. Secondly, "FMT" has to be equal to "*". Finally, "UNIT" has to be equal either to "*" or "6". In such case, "WRITE(*,*)" is replaced by "PRINT *,". */ /* GO: Not anymore for UNIT=6 leave it ... */ while((pio_write != NIL) && (!iolist_reached)) { syntax s = expression_syntax(EXPRESSION(CAR(pio_write))); call c; expression arg = EXPRESSION(CAR(CDR(pio_write))); if(!syntax_call_p(s)) { pips_internal_error("call expected"); } c = syntax_call(s); if(strcmp(entity_local_name(call_function(c)), "FMT=") == 0) { /* Avoid to use words_expression(arg) because it set some attachments and unit_words may not be used later... RK. */ entity f; /* The * format is coded as a call to "LIST_DIRECTED_FORMAT_NAME" function: */ good_fmt = syntax_call_p(expression_syntax(arg)) && value_intrinsic_p(entity_initial(f = call_function(syntax_call(expression_syntax(arg))))) && (strcmp(entity_local_name(f), LIST_DIRECTED_FORMAT_NAME) == 0); pio_write = CDR(CDR(pio_write)); /* To display the format later: */ fmt_arg = arg; } else if(strcmp(entity_local_name(call_function(c)), "UNIT=") == 0) { /* Avoid to use words_expression(arg) because it set some attachments and unit_words may not be used later... RK. */ entity f; /* The * format is coded as a call to "LIST_DIRECTED_FORMAT_NAME" function: */ good_unit = syntax_call_p(expression_syntax(arg)) && value_intrinsic_p(entity_initial(f = call_function(syntax_call(expression_syntax(arg))))) && (strcmp(entity_local_name(f), LIST_DIRECTED_FORMAT_NAME) == 0); /* To display the unit later: */ unit_arg = arg; pio_write = CDR(CDR(pio_write)); } else if(strcmp(entity_local_name(call_function(c)), IO_LIST_STRING_NAME) == 0) { iolist_reached = true; pio_write = CDR(pio_write); } else { complex_io_control_list = true; pio_write = CDR(CDR(pio_write)); } } if(good_fmt && good_unit && same_string_p(called, "WRITE")) { /* WRITE (*,*) -> PRINT * */ if(pio_write != NIL) /* WRITE (*,*) pio -> PRINT *, pio */ { pc = CHAIN_SWORD(pc, "PRINT *, "); } else /* WRITE (*,*) -> PRINT * */ { pc = CHAIN_SWORD(pc, "PRINT * "); } pcio = pio_write; } else if(good_fmt && good_unit && same_string_p(called, "READ")) { /* READ (*,*) -> READ * */ if(pio_write != NIL) /* READ (*,*) pio -> READ *, pio */ { switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, "READ *, "); break; case is_language_c: pc = CHAIN_SWORD(pc, "_f77_intrinsics_read_("); break; default: pips_internal_error("Language unknown !"); break; } } else /* READ (*,*) -> READ * */ { pc = CHAIN_SWORD(pc, "READ * "); } pcio = pio_write; } else if(!complex_io_control_list) { list unit_words = words_expression(unit_arg, pdl); pips_assert("A unit must be defined", !ENDP(unit_words)); pc = CHAIN_SWORD(pc, entity_local_name(call_function(obj))); pc = CHAIN_SWORD(pc, " ("); pc = gen_nconc(pc, unit_words); if(!expression_undefined_p(fmt_arg)) { /* There is a FORMAT: */ pc = CHAIN_SWORD(pc, space_p? ", " : ","); pc = gen_nconc(pc, words_expression(fmt_arg, pdl)); } pc = CHAIN_SWORD(pc, ") "); pcio = pio_write; } else { pc = CHAIN_SWORD(pc, entity_local_name(call_function(obj))); pc = CHAIN_SWORD(pc, " ("); /* FI: missing argument; I use "precedence" because I've no clue; see LZ */ pc = gen_nconc(pc, words_io_control(&pcio, precedence, leftmost, pdl)); pc = CHAIN_SWORD(pc, ") "); /* free_words(fmt_words); */ } /* because the "IOLIST=" keyword is embedded in the list and because the first IOLIST= has already been skipped, only odd elements are printed */ MAPL(pp, { pc = gen_nconc(pc, words_expression(EXPRESSION(CAR(pp)), pdl)); if (CDR(pp) != NIL) { POP(pp); if(pp==NIL) pips_internal_error("missing element in IO list"); pc = CHAIN_SWORD(pc, space_p? ", " : ","); } }, pcio); if(prettyprint_language_is_c_p()) pc = CHAIN_SWORD(pc, ") "); return (pc); } /** * Implemented for ALLOCATE(), but is applicable for every call to * function that take STAT= parameter */ static list words_stat_io_inst(call obj, int __attribute__((unused)) precedence, bool __attribute__((unused)) leftmost, list pdl) { list pc = NIL; list pcio = call_arguments(obj); list pio_write = pcio; const char* called = entity_local_name(call_function(obj)); bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES"); /* Write call function */ pc = CHAIN_SWORD(pc, called); pc = CHAIN_SWORD(pc, " ("); while ( ( pio_write != NIL ) ) { expression expr = EXPRESSION(CAR(pio_write)); syntax s = expression_syntax(expr); call c; if ( syntax_call_p(s) ) { /* STAT= is a call */ c = syntax_call(s); if ( strcmp( entity_local_name( call_function(c) ), "STAT=" ) == 0 ) { /* We got it ! */ pc = CHAIN_SWORD(pc, strdup("STAT=")); /* FIXME : strdup ? */ /* get argument */ pio_write = CDR(pio_write); expression arg = EXPRESSION(CAR(pio_write)); pc = gen_nconc( pc, words_expression( arg, pdl ) ); } } else { /* It's not a call */ pc = gen_nconc( pc, words_expression( expr, pdl ) ); } pio_write = CDR(pio_write); if(pio_write) { pc = CHAIN_SWORD(pc, space_p? ", " : ","); } } pc = CHAIN_SWORD(pc, ") "); return ( pc ); } static list null(call __attribute__ ((unused)) obj, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, list __attribute__ ((unused)) pdl) { return(NIL); } static list words_prefix_unary_op(call obj, int precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list pc = NIL; expression e = EXPRESSION(CAR(call_arguments(obj))); int prec = words_intrinsic_precedence(obj); const char* fun = entity_local_name(call_function(obj)); if (strcmp(fun,PRE_INCREMENT_OPERATOR_NAME) == 0) fun = "++"; else if (strcmp(fun,PRE_DECREMENT_OPERATOR_NAME) == 0) fun = "--"; else if (strcmp(fun,ADDRESS_OF_OPERATOR_NAME) == 0) fun = "&"; else if (strcmp(fun,C_NOT_OPERATOR_NAME) == 0) fun = "!"; else if (strcmp(fun,BITWISE_NOT_OPERATOR_NAME) == 0) fun = "~"; else if (strcmp(fun,DEREFERENCING_OPERATOR_NAME) == 0) /* Since we put no spaces around an operator (to not change Fortran), the blank before '*' is used to avoid the confusion in the case of divide operator, i.e d1 = 1.0 / *det in function inv_j, SPEC2000 quake benchmark. But we do not want this in a lhs and espcially with a double dereferencing. */ fun = "*"; else if(prettyprint_language_is_c_p()){ if(strcasecmp(fun, NOT_OPERATOR_NAME)==0) fun="!"; if(strcasecmp(fun, UNARY_PLUS_OPERATOR_NAME)==0) { /* You do not want to transform +1 + +1 into +1++ 1 */ /* Maybe the precedence could be useful to avoid adding a useless SPACE, but unary plus is rare enough to reduce the ROI of such anoptimization to zero. */ fun=" +"; } } pc = CHAIN_SWORD(pc,fun); pc = gen_nconc(pc, words_subexpression(e, prec, false, pdl)); if(prec < precedence || (!precedence_p && precedence>0)) { pc = CONS(STRING, MAKE_SWORD("("), pc); pc = CHAIN_SWORD(pc, ")"); } return(pc); } static list words_postfix_unary_op(call obj, int precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list pc = NIL; expression e = EXPRESSION(CAR(call_arguments(obj))); int prec = words_intrinsic_precedence(obj); const char* fun = entity_local_name(call_function(obj)); pc = gen_nconc(pc, words_subexpression(e, prec, false, pdl)); if (strcmp(fun,POST_INCREMENT_OPERATOR_NAME) == 0) fun = "++"; else if (strcmp(fun,POST_DECREMENT_OPERATOR_NAME) == 0) fun = "--"; pc = CHAIN_SWORD(pc,fun); if(prec < precedence || (!precedence_p && precedence>0)) { pc = CONS(STRING, MAKE_SWORD("("), pc); pc = CHAIN_SWORD(pc, ")"); } return(pc); } static list words_unary_minus(call obj, int precedence, bool leftmost, list pdl) { list pc = NIL; expression e = EXPRESSION(CAR(call_arguments(obj))); int prec = words_intrinsic_precedence(obj); if ( prec < precedence || !leftmost || (!precedence_p && precedence>0)) pc = CHAIN_SWORD(pc, "("); /* make sure the minus can not be split apart from its argument */ list sub = words_subexpression(e, prec, false, pdl); string fst = STRING(CAR(sub)); POP(sub); string nfst ; asprintf(&nfst,"-%s",fst); free(fst); sub=CONS(STRING,nfst,sub); pc = gen_nconc(pc, sub); if ( prec < precedence || !leftmost || (!precedence_p && precedence>0)) pc = CHAIN_SWORD(pc, ")"); return(pc); } /* The precedence of (1/x) is the same as the multiply operator (e.g. a*1/b without parentheses). Moreover, the MAXIMAL precedence is used for the (x) subterm (e.g. 1/(a*b) 1/(-2) ...). However, 1/x**2 may be a correct prettyprint in Fortran (?) */ /* WARNING : the floating point division is used wether b is an int or not ! (1.0/b) -- in fact b should not be an int ! */ static list /* of string */ words_inverse_op(call obj, int precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list /* of string */ pc = NIL; expression e = EXPRESSION(CAR(call_arguments(obj))); int prec = words_intrinsic_precedence(obj); if ( prec < precedence) pc = CHAIN_SWORD(pc, "("); pc = CHAIN_SWORD(pc, "1./"); pc = gen_nconc(pc, words_subexpression(e, MAXIMAL_PRECEDENCE , false, pdl)); if ( prec < precedence) pc = CHAIN_SWORD(pc, ")"); return(pc); } /* This function is useful only for parsed codes since gotos are removed by the controlizer */ list /* of string */ words_goto_label(const char* tlabel) { list pc = NIL; if (strcmp(tlabel, RETURN_LABEL_NAME) == 0) { /*<<<<<<< .working switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, RETURN_FUNCTION_NAME); break; case is_language_c: pc = CHAIN_SWORD(pc, C_RETURN_FUNCTION_NAME); pc = CHAIN_SWORD(pc, ";"); break; default: pips_internal_error("Language unknown !"); break; } =======*/ switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, RETURN_FUNCTION_NAME); break; case is_language_c: { entity f = get_current_module_entity(); if(void_function_p(f)) { // FI: this hides the parsed code structure and is wrong // in C because a value may have to be returned; pc = CHAIN_SWORD(pc, C_RETURN_FUNCTION_NAME); pc = CHAIN_SWORD(pc, ";"); } else { entity rv = function_to_return_value(f); pc = CHAIN_SWORD(pc, C_RETURN_FUNCTION_NAME" "); pc = CHAIN_SWORD(pc, entity_user_name(rv)); pc = CHAIN_SWORD(pc, ";"); } if(false) { // the gotos are maintained, but the final return must be printed out // FI: this would only work if the final return were printed // out for sure and with its label /* In C, a label cannot begin with a number so "l" is added for this case*/ pc = CHAIN_SWORD(pc, strdup((isdigit(tlabel[0])?"goto l":"goto "))); pc = CHAIN_SWORD(pc, tlabel); pc = CHAIN_SWORD(pc, C_CONTINUE_FUNCTION_NAME); } break; } default: pips_internal_error("Language unknown !"); break; } //>>>>>>> .merge-right.r18859 } else { switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, strdup("GOTO ")); pc = CHAIN_SWORD(pc, tlabel); break; case is_language_c: /* In C, a label cannot begin with a number so "l" is added for this case*/ pc = CHAIN_SWORD(pc, strdup((isdigit(tlabel[0])?"goto l":"goto "))); pc = CHAIN_SWORD(pc, tlabel); pc = CHAIN_SWORD(pc, C_CONTINUE_FUNCTION_NAME); break; default: pips_internal_error("Language unknown !"); break; } } return pc; } static list eole_fmx_specific_op(call obj, int __attribute__ ((unused)) precedence, bool __attribute__ ((unused)) leftmost, bool isadd, list pdl) { list /* of strings */ pc = NIL; list /* of expressions */ args = call_arguments(obj); int prec ; /* open parenthese one */ pc = CHAIN_SWORD(pc, "("); /* open parenthese two */ pc = CHAIN_SWORD(pc, "("); /* get precedence for mult operator */ prec = intrinsic_precedence("*"); /* first argument */ pc = gen_nconc(pc,words_subexpression(EXPRESSION(CAR(args)), prec, true, pdl)); /* mult operator */ pc = CHAIN_SWORD(pc,"*"); /* second argument */ args = CDR(args); pc = gen_nconc(pc,words_subexpression(EXPRESSION(CAR(args)),prec,true, pdl)); /* close parenthese two */ pc = CHAIN_SWORD(pc, ")"); /* get precedence for add operator */ prec = intrinsic_precedence("+"); /* add/sub operator */ pc = CHAIN_SWORD(pc, isadd? "+": "-"); /* third argument */ args = CDR(args); pc = gen_nconc(pc,words_subexpression(EXPRESSION(CAR(args)),prec,false, pdl)); /* close parenthese one */ pc = CHAIN_SWORD(pc,")"); return pc; } /* EOLE : The multiply-add operator is used within the optimize transformation ( JZ - sept 98) - fma(a,b,c) -> ((a*b)+c) */ list /* of string */ eole_fma_specific_op(call obj, int precedence, bool leftmost, list pdl) { return eole_fmx_specific_op(obj, precedence, leftmost, true, pdl); } /* MULTIPLY-SUB operator */ list /* of string */ eole_fms_specific_op(call obj, int precedence, bool leftmost, list pdl) { return eole_fmx_specific_op(obj, precedence, leftmost, false, pdl); } /* Check if the given operator is associated with a special prettyprint. For instance, n-ary add and multiply operators which are used in the EOLE project use "+" and "*" prettyprints instead of the entity_local_name (JZ - sept 98) */ static const char* get_special_prettyprint_for_operator(call obj){ static struct special_operator_prettyprint { const char * name; const char * op_prettyprint; } tab_operator_prettyprint[] = { {EOLE_SUM_OPERATOR_NAME,"+"}, {EOLE_PROD_OPERATOR_NAME,"*"}, {NULL,NULL} }; int i = 0; const char* op_name; /* get the entity name */ op_name = entity_local_name(call_function(obj)); while (tab_operator_prettyprint[i].name) { if (!strcmp(tab_operator_prettyprint[i].name,op_name)) return tab_operator_prettyprint[i].op_prettyprint; else i++; } return op_name; } static list words_brace_op(call obj, int precedence __attribute__ ((unused)), bool leftmost __attribute__ ((unused)), list pdl) { expression fake = call_to_expression(copy_call(obj)); list l = words_brace_expression(fake, pdl); free_expression(fake); return l; } /* Extension of "words_infix_binary_op" function for nary operators used in the EOLE project - (since "nary" assumes operators with at least 2 op) - JZ (Oct. 98)*/ static list /* of string */ words_infix_nary_op(call obj, int precedence, bool leftmost, list pdl) { list /*of string*/ pc = NIL; list /* of expressions */ args = call_arguments(obj); /* get current operator precedence */ int prec = words_intrinsic_precedence(obj); expression exp1 = EXPRESSION(CAR(args)); expression exp2; list we1 = words_subexpression(exp1, prec, prec>=MINIMAL_ARITHMETIC_PRECEDENCE? leftmost: true, pdl); list we2; /* open parenthese if necessary */ if ( prec < precedence ) pc = CHAIN_SWORD(pc, "("); pc = gen_nconc(pc, we1); /* reach the second arg */ args = CDR(args); for(; args; args=CDR(args)) { /* for all args */ exp2 = EXPRESSION(CAR(args)); /* * If the infix operator is either "-" or "/", I prefer not to delete * the parentheses of the second expression. * Ex: T = X - ( Y - Z ) and T = X / (Y*Z) * * Lei ZHOU Nov. 4 , 1991 */ if ( strcmp(entity_local_name(call_function(obj)), "/") == 0 ) /* divide operator */ we2 = words_subexpression(exp2, MAXIMAL_PRECEDENCE, false, pdl); else if ( strcmp(entity_local_name(call_function(obj)), "-") == 0 ) { /* minus operator */ if ( expression_call_p(exp2) && words_intrinsic_precedence(syntax_call(expression_syntax(exp2))) >= intrinsic_precedence("*") ) /* precedence is greater than * or / */ we2 = words_subexpression(exp2, prec, false, pdl); else we2 = words_subexpression(exp2, MAXIMAL_PRECEDENCE, false, pdl); } else { we2 = words_subexpression(exp2, prec, prec<MINIMAL_ARITHMETIC_PRECEDENCE, pdl); } /* operator prettyprint */ pc = CHAIN_SWORD(pc, get_special_prettyprint_for_operator(obj)); pc = gen_nconc(pc, we2); } /* close parenthese if necessary */ if ( prec < precedence ) pc = CHAIN_SWORD(pc, ")"); return(pc); } /* * If the infix operator is either "-" or "/", I prefer not to delete * the parentheses of the second expression. * Ex: T = X - ( Y - Z ) and T = X / (Y*Z) * * Lei ZHOU Nov. 4 , 1991 */ static list words_infix_binary_op(call obj, int precedence, bool leftmost, list pdl) { list pc = NIL; list args = call_arguments(obj); int prec = words_intrinsic_precedence(obj); list we1 = words_subexpression(EXPRESSION(CAR(args)), prec, prec>=MINIMAL_ARITHMETIC_PRECEDENCE? leftmost: true, pdl); list we2; const char* fun = entity_local_name(call_function(obj)); /* handling of internally renamed operators */ fun = renamed_op_handling (fun); if(strcmp(fun, DIVIDE_OPERATOR_NAME) == 0) { /* Do we want to add a space in case we2 starts with a dereferencing operator "*"? Nga suggests to look at the quake benchmark of SPEC2000. */ we2 = words_subexpression(EXPRESSION(CAR(CDR(args))), MAXIMAL_PRECEDENCE, false, pdl); } else if (strcmp(fun, MINUS_OPERATOR_NAME) == 0 ) { expression exp = EXPRESSION(CAR(CDR(args))); if(expression_call_p(exp) && words_intrinsic_precedence(syntax_call(expression_syntax(exp))) >= intrinsic_precedence(MULTIPLY_OPERATOR_NAME) ) /* precedence is greater than * or / */ we2 = words_subexpression(exp, prec, false, pdl); else we2 = words_subexpression(exp, MAXIMAL_PRECEDENCE, false, pdl); } else if(strcmp(fun, MULTIPLY_OPERATOR_NAME) == 0) { expression exp = EXPRESSION(CAR(CDR(args))); if(expression_call_p(exp) && ENTITY_DIVIDE_P(call_function(syntax_call(expression_syntax(exp))))) { basic bexp = basic_of_expression(exp); if(basic_int_p(bexp)) { we2 = words_subexpression(exp, MAXIMAL_PRECEDENCE, false, pdl); } else we2 = words_subexpression(exp, prec, false, pdl); free_basic(bexp); } else we2 = words_subexpression(exp, prec, false, pdl); } else { /* If the operator in the second subexpression has the same priority as the current operator, it has to be parenthesized to respect the structure imposed by the programmer. For instance, a+(b+c) does require parentheses whereas (a+b)+c is the same as a+b+c. So we1 and we2 cannot be processed exactly in the same way. */ we2 = words_subexpression(EXPRESSION(CAR(CDR(args))), prec+1, prec<MINIMAL_ARITHMETIC_PRECEDENCE, pdl); } /* Use precedence to generate or not parentheses, * unless parentheses are always required */ if(prec < precedence || (!precedence_p && precedence>0)) { pc = CHAIN_SWORD(pc, "("); } if(prettyprint_language_is_fortran95_p() && strcmp(fun, FIELD_OPERATOR_NAME) == 0) { pc = gen_nconc(pc, we1); } else if(prettyprint_language_is_c_p()) { /* Check that C ambiguities such as "a+++b" for "a++ + b" or "a + ++b" are not generated */ if(strcmp(fun,"+")==0 || strcmp(fun, "-")==0) { pips_assert("left and right subexpressions are defined", !ENDP(we1) && !ENDP(we2)); string l = STRING(CAR(gen_last(we1))); string f = STRING(CAR(we2)); char lc = *(l+strlen(l)-1); char fc = *f; string pre = ""; string post = ""; if(*fun==lc) pre = " "; if(*fun==fc) post = " "; pc = gen_nconc(pc, we1); pc = CHAIN_SWORD(pc, pre); pc = CHAIN_SWORD(pc, fun); pc = CHAIN_SWORD(pc, post); pc = gen_nconc(pc, we2); } else { pc = gen_nconc(pc, we1); pc = CHAIN_SWORD(pc, fun); pc = gen_nconc(pc, we2); } } else { pc = gen_nconc(pc, we1); pc = CHAIN_SWORD(pc, fun); pc = gen_nconc(pc, we2); } if(prec < precedence || (!precedence_p && precedence>0)) { pc = CHAIN_SWORD(pc, ")"); } return(pc); } /* Nga Nguyen : this case is added for comma expression in C, but I am not sure about its precedence => to be looked at more carefully */ static list words_comma_op(call obj, int precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list pc = NIL, args = call_arguments(obj); int prec = words_intrinsic_precedence(obj); bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES"); if(prec < precedence || !precedence_p) pc = CHAIN_SWORD(pc,"("); pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(args)), prec, true, pdl)); while (!ENDP(CDR(args))) { pc = CHAIN_SWORD(pc,space_p?", " : ","); pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(CDR(args))), prec, true, pdl)); args = CDR(args); } if(prec < precedence || !precedence_p) pc = CHAIN_SWORD(pc,")"); return(pc); } static list words_conditional_op(call obj, int precedence, bool __attribute__ ((unused)) leftmost, list pdl) { list pc = NIL, args = call_arguments(obj); int prec = words_intrinsic_precedence(obj); if(prec < precedence || !precedence_p) pc = CHAIN_SWORD(pc,"("); pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(args)), prec, true, pdl)); pc = CHAIN_SWORD(pc,"?"); pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(CDR(args))), prec, true, pdl)); pc = CHAIN_SWORD(pc,":"); pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(CDR(CDR(args)))), prec, true, pdl)); if(prec < precedence || !precedence_p) pc = CHAIN_SWORD(pc,")"); return(pc); } /* precedence needed here * According to the Precedence of Operators * Arithmetic > Character > Relational > Logical * Added by Lei ZHOU Nov. 4,91 * * A precedence is a integer in [0..MAXIMAL_PRECEDENCE] */ static struct intrinsic_handler { const char * name; intrinsic_desc_t desc; } tab_intrinsic_handler[] = { {BRACE_INTRINSIC, { words_brace_op, 31 } }, {POWER_OPERATOR_NAME, { words_infix_binary_op, 30} }, {CONCATENATION_FUNCTION_NAME, {words_infix_binary_op, 30} }, /* The Fortran 77 standard does not allow x*-3 or x+-3, but this is dealt * with by argument leftmost, not by prorities. */ {UNARY_MINUS_OPERATOR_NAME, { words_unary_minus, 25} }, /* {"--", words_unary_minus, 19}, */ {INVERSE_OPERATOR_NAME, { words_inverse_op, 21} }, {PLUS_OPERATOR_NAME, { words_infix_binary_op, 20} }, {MINUS_OPERATOR_NAME, { words_infix_binary_op, 20} }, /* Non-arithemtic operators have priorities lesser than * MINIMAL_ARITHMETIC_PRECEDENCE leftmost is restaured to true for * unary minus. */ {LESS_THAN_OPERATOR_NAME, { words_infix_binary_op, 15} }, {GREATER_THAN_OPERATOR_NAME, { words_infix_binary_op, 15} }, {LESS_OR_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} }, {GREATER_OR_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} }, {EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} }, {NON_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} }, {NOT_OPERATOR_NAME, { words_prefix_unary_op, 9} }, {AND_OPERATOR_NAME, { words_infix_binary_op, 8} }, {OR_OPERATOR_NAME, { words_infix_binary_op, 6} }, {EQUIV_OPERATOR_NAME, { words_infix_binary_op, 3} }, {NON_EQUIV_OPERATOR_NAME, { words_infix_binary_op, 3} }, {ASSIGN_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {ALLOCATE_FUNCTION_NAME, { words_stat_io_inst, 0} }, {DEALLOCATE_FUNCTION_NAME, { words_stat_io_inst, 0} }, {WRITE_FUNCTION_NAME, { words_io_inst, 0} }, {READ_FUNCTION_NAME, { words_io_inst, 0} }, {PRINT_FUNCTION_NAME, { words_io_inst, 0} }, {OPEN_FUNCTION_NAME, { words_io_inst, 0} }, {CLOSE_FUNCTION_NAME, { words_io_inst, 0} }, {INQUIRE_FUNCTION_NAME, { words_io_inst, 0} }, {BACKSPACE_FUNCTION_NAME, { words_io_inst, 0} }, {REWIND_FUNCTION_NAME, { words_io_inst, 0} }, {ENDFILE_FUNCTION_NAME, { words_io_inst, 0} }, {IMPLIED_DO_FUNCTION_NAME, { words_implied_do, 0} }, {RETURN_FUNCTION_NAME, { words_nullary_op,0} }, {C_RETURN_FUNCTION_NAME, { words_nullary_op,0} }, {PAUSE_FUNCTION_NAME, { words_nullary_op,0 } }, {STOP_FUNCTION_NAME, { words_nullary_op, 0} }, {CONTINUE_FUNCTION_NAME, { words_nullary_op,0} }, {END_FUNCTION_NAME, { words_nullary_op, 0} }, {FORMAT_FUNCTION_NAME, { words_prefix_unary_op, 0} }, {UNBOUNDED_DIMENSION_NAME, { words_unbounded_dimension, 0} }, {LIST_DIRECTED_FORMAT_NAME, { words_list_directed, 0} }, {SUBSTRING_FUNCTION_NAME, { words_substring_op, 0} }, {ASSIGN_SUBSTRING_FUNCTION_NAME, { words_assign_substring_op, 0} }, /* These operators are used within the optimize transformation in order to manipulate operators such as n-ary add and multiply or multiply-add operators ( JZ - sept 98) */ {EOLE_FMA_OPERATOR_NAME, { eole_fma_specific_op, MINIMAL_ARITHMETIC_PRECEDENCE } }, {EOLE_FMS_OPERATOR_NAME, { eole_fms_specific_op, MINIMAL_ARITHMETIC_PRECEDENCE } }, {EOLE_SUM_OPERATOR_NAME, { words_infix_nary_op, 20} }, {EOLE_PROD_OPERATOR_NAME, { words_infix_nary_op, 21} }, /* show IMA/IMS */ {IMA_OPERATOR_NAME, { eole_fma_specific_op, MINIMAL_ARITHMETIC_PRECEDENCE } }, {IMS_OPERATOR_NAME, { eole_fms_specific_op, MINIMAL_ARITHMETIC_PRECEDENCE } }, /* 05/08/2003 - Nga Nguyen - Here are C intrinsics. The precedence is computed by using Table xx, page 49, book "The C programming language" of Kernighan and Ritchie, and by taking into account the precedence value of Fortran intrinsics. */ {FIELD_OPERATOR_NAME, { words_infix_binary_op, 30} }, {POINT_TO_OPERATOR_NAME, { words_infix_binary_op, 30} }, {POST_INCREMENT_OPERATOR_NAME, { words_postfix_unary_op, 30} }, {POST_DECREMENT_OPERATOR_NAME, { words_postfix_unary_op, 30} }, {PRE_INCREMENT_OPERATOR_NAME, { words_prefix_unary_op, 25} }, {PRE_DECREMENT_OPERATOR_NAME, { words_prefix_unary_op, 25} }, {ADDRESS_OF_OPERATOR_NAME, { words_prefix_unary_op,25} }, {DEREFERENCING_OPERATOR_NAME, { words_prefix_unary_op, 25} }, {UNARY_PLUS_OPERATOR_NAME, { words_prefix_unary_op, 25} }, /*{"-unary", words_prefix_unary_op, 25},*/ {BITWISE_NOT_OPERATOR_NAME, { words_prefix_unary_op, 25} }, {C_NOT_OPERATOR_NAME, { words_prefix_unary_op, 25} }, /* What is the priority for CAST? 23? */ #define CAST_OPERATOR_PRECEDENCE (23) {C_MODULO_OPERATOR_NAME, { words_infix_binary_op, 22} }, {MULTIPLY_OPERATOR_NAME, { words_infix_binary_op, 22} }, {DIVIDE_OPERATOR_NAME, { words_infix_binary_op, 22} }, {PLUS_C_OPERATOR_NAME, { words_infix_binary_op, 20} }, {MINUS_C_OPERATOR_NAME, { words_infix_binary_op, 20} }, {LEFT_SHIFT_OPERATOR_NAME, { words_infix_binary_op, 18} }, {RIGHT_SHIFT_OPERATOR_NAME, { words_infix_binary_op, 18} }, {C_LESS_THAN_OPERATOR_NAME, { words_infix_binary_op, 15 } }, {C_GREATER_THAN_OPERATOR_NAME, { words_infix_binary_op, 15} }, {C_LESS_OR_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} }, {C_GREATER_OR_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} }, {C_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 14} }, {C_NON_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 14} }, {BITWISE_AND_OPERATOR_NAME, { words_infix_binary_op, 13} }, {BITWISE_XOR_OPERATOR_NAME, { words_infix_binary_op, 12} }, {BITWISE_OR_OPERATOR_NAME, { words_infix_binary_op, 11} }, {C_AND_OPERATOR_NAME, { words_infix_binary_op, 8} }, {C_OR_OPERATOR_NAME, { words_infix_binary_op, 6} }, {MULTIPLY_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {DIVIDE_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {MODULO_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {PLUS_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {MINUS_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {LEFT_SHIFT_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {RIGHT_SHIFT_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {BITWISE_AND_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {BITWISE_XOR_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, {BITWISE_OR_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} }, /* which precedence ? You are safe within an assignment. */ {CONDITIONAL_OPERATOR_NAME, { words_conditional_op, ASSIGN_OPERATOR_PRECEDENCE+1} }, /* which precedence ? You need parentheses within an assignment. */ {COMMA_OPERATOR_NAME, { words_comma_op, ASSIGN_OPERATOR_PRECEDENCE-1} }, /* OMP pragma function part */ {OMP_OMP_FUNCTION_NAME, { words_nullary_op, 0} }, {OMP_FOR_FUNCTION_NAME, { words_nullary_op, 0} }, {OMP_PARALLEL_FUNCTION_NAME, { words_nullary_op, 0} }, {OMP_REDUCTION_FUNCTION_NAME, { words_omp_red, 0} }, {NULL, { null, 0} } }; static hash_table intrinsic_handlers = hash_table_undefined; static void init_intrinsic_handlers() { if(hash_table_undefined_p(intrinsic_handlers)) { intrinsic_handlers = hash_table_make(hash_string,sizeof(tab_intrinsic_handler)); for(struct intrinsic_handler *p = &tab_intrinsic_handler[0];p->name;p++) { // no copy because the memory is static hash_put(intrinsic_handlers,p->name,&p->desc); } } } /* after this call, name and desc are owned by intrinsic_handlers, but will never be deallocated * they must point to permanent storage */ void register_intrinsic_handler(const char* name,intrinsic_desc_t *desc) { if(hash_table_undefined_p(intrinsic_handlers)) { init_intrinsic_handlers(); } hash_put(intrinsic_handlers,name,desc); } static list words_intrinsic_call(call obj, int precedence, bool leftmost, list pdl) { if(hash_table_undefined_p(intrinsic_handlers)) { init_intrinsic_handlers(); } const char *n = entity_local_name(call_function(obj)); intrinsic_desc_t *d = hash_get(intrinsic_handlers,n); if(d!= HASH_UNDEFINED_VALUE) return d->f(obj, precedence, leftmost, pdl); else return words_regular_call(obj, false, pdl); } static int intrinsic_precedence(const char* n) { if(hash_table_undefined_p(intrinsic_handlers)) { init_intrinsic_handlers(); } intrinsic_desc_t *d = hash_get(intrinsic_handlers,n); if(d!= HASH_UNDEFINED_VALUE) return d->prec; else return 0; } static int words_intrinsic_precedence(call obj) { const char *n = entity_local_name(call_function(obj)); return intrinsic_precedence(n); } static list words_va_arg(list obj, list pdl) { list pc = NIL; expression e1 = sizeofexpression_expression(SIZEOFEXPRESSION(CAR(obj))); type t2 = sizeofexpression_type(SIZEOFEXPRESSION(CAR(CDR(obj)))); bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES"); pc = CHAIN_SWORD(pc,"va_arg("); pc = gen_nconc(pc, words_expression(e1, pdl)); pc = CHAIN_SWORD(pc, space_p? ", " : ","); pc = gen_nconc(pc, words_type(t2, pdl, false)); pc = CHAIN_SWORD(pc,")"); return pc; } /* exported for cmfortran.c */ list words_call( call obj, int precedence, bool leftmost, bool is_a_subroutine, list pdl) { list pc; entity f = call_function(obj); value i = entity_initial(f); if(value_intrinsic_p(i)) { int effective_precedence = (precedence_p||precedence<=1)? precedence : MAXIMAL_PRECEDENCE; pc = words_intrinsic_call(obj, effective_precedence, leftmost, pdl); } else pc = words_genuine_regular_call(obj, is_a_subroutine, pdl); return pc; } /* This one is exported. Outer parentheses are never useful. pdl can point to an empty list, but it must be free on return*/ list /* of string */ words_expression(expression obj, list pdl) { return words_syntax(expression_syntax(obj), pdl); } /* exported for expression.c */ list words_syntax(syntax obj, list pdl) { list pc = NIL; switch (syntax_tag(obj)) { case is_syntax_reference : pc = words_reference(syntax_reference(obj), pdl); break; case is_syntax_range: pc = words_range(syntax_range(obj), pdl); break; case is_syntax_call: pc = words_call(syntax_call(obj), 0, true, false, pdl); break; case is_syntax_cast: pc = words_cast(syntax_cast(obj), 0, pdl); break; case is_syntax_sizeofexpression: { /* FI->SG: I do not know if in_type_declaration is true, false or a formal parameter */ bool in_type_declaration = true; pc = words_sizeofexpression(syntax_sizeofexpression(obj), in_type_declaration, pdl); break; } case is_syntax_subscript: pc = words_subscript(syntax_subscript(obj), pdl); break; case is_syntax_application: pc = words_application(syntax_application(obj), pdl); break; case is_syntax_va_arg: pc = words_va_arg(syntax_va_arg(obj), pdl); break; default: pips_internal_error("unexpected tag"); } return(pc); } /* exported for cmfortran.c */ list words_subexpression( expression obj, int precedence, bool leftmost, list pdl) { list pc; if ( expression_call_p(obj) ) pc = words_call(syntax_call(expression_syntax(obj)), precedence, leftmost, false, pdl); else if(expression_cast_p(obj)) { cast c = expression_cast(obj); pc = words_cast(c, precedence, pdl); } else pc = words_syntax(expression_syntax(obj), pdl); return pc; } /**************************************************************** SENTENCE */ static sentence sentence_tail(entity e) { sentence result = sentence_undefined; switch(get_prettyprint_language_tag()) { case is_language_fortran: result = MAKE_ONE_WORD_SENTENCE(0, "END"); break; case is_language_c: result = MAKE_ONE_WORD_SENTENCE(0, "}"); break; case is_language_fortran95: { /* In fortran 95, we want the end to be followed by the type of construct * and its name. */ list pc = NIL; type te = entity_type(e); functional fe; type tr; pc = CHAIN_SWORD(pc,"END "); pips_assert("is functionnal", type_functional_p(te)); if (static_module_p(e)) pc = CHAIN_SWORD(pc,"static "); fe = type_functional(te); tr = functional_result(fe); switch(type_tag(tr)) { case is_type_void: if (entity_main_module_p(e)) pc = CHAIN_SWORD(pc,"PROGRAM "); else { if (entity_blockdata_p(e)) pc = CHAIN_SWORD(pc, "BLOCKDATA "); else if (entity_f95module_p(e)) pc = CHAIN_SWORD(pc, "MODULE "); else pc = CHAIN_SWORD(pc,"SUBROUTINE "); } break; case is_type_variable: { pc = CHAIN_SWORD(pc,"FUNCTION "); break; } case is_type_unknown: /* * For C functions with no return type. * It can be treated as of type int, but we keep it unknown * for the moment, to make the differences and to regenerate initial code */ break; default: pips_internal_error("unexpected type for result"); } pc = CHAIN_SWORD(pc, entity_user_name(e)); result = make_sentence(is_sentence_unformatted, make_unformatted(NULL, 0, 0, pc)); break; } default: pips_internal_error("Language unknown !"); break; } return result; } /* exported for unstructured.c */ sentence sentence_goto_label( entity __attribute__ ((unused)) module, const char* label, int margin, const char* tlabel, int n) { list pc = words_goto_label(tlabel); return(make_sentence(is_sentence_unformatted, make_unformatted(label?strdup(label):NULL, n, margin, pc))); } static sentence sentence_goto(entity module, const char* label, int margin, statement obj, int n) { const char* tlabel = entity_local_name(statement_label(obj)) + sizeof(LABEL_PREFIX) -1; pips_assert("Legal label required", strlen(tlabel)!=0); return sentence_goto_label(module, label, margin, tlabel, n); } /* Build the text of a code block (a list of statements) @module is the module entity the code to display belong to @label is the label associated to the block @param margin is the indentation level @param objs is the list of statements in the sequence to display @param n is the statement number of the sequence @pdl is the parser declaration list to track type declaration display in C @return the text of the block */ static text text_block(entity module, const char* label, int margin, list objs, int n, list pdl) { text r = make_text(NIL); if (ENDP(objs) && ! (get_bool_property("PRETTYPRINT_EMPTY_BLOCKS") || get_bool_property("PRETTYPRINT_ALL_C_BLOCKS"))) return(r); if(!empty_string_p(label)) { pips_user_warning("Illegal label \"%s\". " "Blocks cannot carry a label\n", label); } /* "Unformatted" to be added at the beginning and at the end of a block: */ unformatted bm_beg = NULL; unformatted bm_end = NULL; // Test if block markers are required and set them: bool flg_marker = mark_block(&bm_beg, &bm_end, n, margin); // Print the begin block marker(s) if needed: if (flg_marker == true) ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, bm_beg)); else if ((get_bool_property("PRETTYPRINT_ALL_EFFECTS") || get_bool_property("PRETTYPRINT_BLOCKS")) && get_bool_property("PRETTYPRINT_FOR_FORESYS")) ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, strdup("C$BB\n"))); if (get_bool_property("PRETTYPRINT_ALL_C_BLOCKS")) { /* Since we generate new { }, we increment the margin for the nested statements: */ margin -= INDENTATION; if (margin < 0) margin = 0; ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "{{")); margin += INDENTATION; } // Append local variables if any: r = insert_locals (r); /* Now begin block markers and declarations have been printed, so print the block instructions: */ for (; objs != NIL; objs = CDR(objs)) { statement s = STATEMENT(CAR(objs)); text t = text_statement_enclosed(module, margin, s, false, true, pdl); text_sentences(r) = gen_nconc(text_sentences(r), text_sentences(t)); text_sentences(t) = NIL; free_text(t); } if (get_bool_property("PRETTYPRINT_ALL_C_BLOCKS")) { /* Get back to previous indentation: */ margin -= INDENTATION; ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "}}")); margin += INDENTATION; } // Print the end block marker(s) if needed: if (flg_marker == true) ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, bm_end)); return r; } /* @return a list of string with the variable that need to be private in the * current context. The context takes care of the kind of output. For example * in the case of open mp the variables would be encapsulated into * the private() clause like this: private (a,b). * @param obj the loop to look at. */ static list /* of string */ loop_private_variables(loop obj, list pdl) { bool all_private = get_bool_property("PRETTYPRINT_ALL_PRIVATE_VARIABLES"), hpf_private = pp_hpf_style_p(), omp_private = pp_omp_style_p(), some_before = false; list l = NIL; // list of local entities // In case of openmp the variable declared in the loop body should // not be made private, so ask for removing them from the list of locals. // If all_private is false -> remove loop indice from the list of locals. list locals = loop_private_variables_as_entites(obj, omp_private, !all_private); pips_debug(5, "#printed %zd/%zd\n", gen_length(l), gen_length(loop_locals(obj))); /* stuff around if not empty */ if (locals) { string private = string_undefined; if (hpf_private) { private = "NEW("; } else if (omp_private) { switch (get_prettyprint_language_tag()) { case is_language_fortran: private = "PRIVATE("; break; case is_language_c: private = "private("; break; case is_language_fortran95: pips_internal_error("Need to update F95 case"); break; default: pips_internal_error("Language unknown !"); break; } } else if(get_prettyprint_language_tag()==is_language_fortran) { /* This is debugging way to print out code. I do not know which Fortran parser takes this language extension. */ private = "PRIVATE "; } else { /* In C case, it might be a good idea to re-declare the private variables in the loop body, exceot for outer loop indices, but this is not easy here. PIPS data structures should be updated because loop_private is somehow redundant with statement declarations. */ pips_user_warning("Privatized variables are ignored with the " "current prettyprinter options.\n"); } if(!string_undefined_p(private)) { /* comma-separated list of private variables. * built in reverse order to avoid adding at the end... */ FOREACH (ENTITY, p, locals) { if (some_before) l = CHAIN_SWORD(l, ","); else some_before = true; /* from now on commas, triggered... */ l = gen_nconc(l, words_declaration(p, true, pdl)); } gen_free_list(locals); l = CONS(STRING, MAKE_SWORD(private), l); if (hpf_private || omp_private) CHAIN_SWORD(l, ")"); } } return l; } /* returns a formatted text for the HPF independent and new directive * well, no continuations and so, but the directives do not fit the * unformatted domain, because the directive prolog would not be well * managed there. */ static string marged( string prefix, int margin) { int len = strlen(prefix), i; string result = (string) malloc(strlen(prefix)+margin+1); strcpy(result, prefix); if(prettyprint_language_is_fortran_p()) { for (i=len; margin-->0;) { result[i++] = ' '; result[i]='\0'; } } return result; } static text text_directive(loop obj, /* the loop we're interested in */ int margin, string basic_directive, string basic_continuation, string parallel, list pdl) { string dir = marged(basic_directive, margin), cont = marged(basic_continuation, margin); text t = make_text(NIL); char buffer[100]; /* ??? */ list /* of string */l = NIL; bool is_hpf = pp_hpf_style_p(), is_omp = pp_omp_style_p(); bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES"); /* start buffer */ buffer[0] = '\0'; if (execution_parallel_p(loop_execution(obj))) { add_to_current_line(buffer, dir, cont, t); add_to_current_line(buffer, parallel, cont, t); l = loop_private_variables(obj, pdl); if (l && is_hpf) add_to_current_line(buffer, space_p ? ", " : ",", cont, t); } else if (get_bool_property("PRETTYPRINT_ALL_PRIVATE_VARIABLES")) { l = loop_private_variables(obj, pdl); if (l) { add_to_current_line(buffer, dir, cont, t); if (is_omp) { switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: add_to_current_line(buffer, "DO ", cont, t); break; case is_language_c: add_to_current_line(buffer, "for ", cont, t); break; default: pips_internal_error("Language unknown !"); break; } } } } if (strlen(buffer) > 0) MAP(STRING, s, add_to_current_line(buffer, s, cont, t), l); /* what about reductions? should be associated to the ri somewhere. */ close_current_line(buffer, t, cont); free(dir); free(cont); return t; } #define HPF_SENTINEL "!HPF$" #define HPF_DIRECTIVE HPF_SENTINEL " " #define HPF_CONTINUATION HPF_SENTINEL "x" #define HPF_INDEPENDENT "INDEPENDENT" static text text_hpf_directive(loop l, int m) { list pdl = NIL; // pdl is useless in Fortran text t = text_directive(l, m, "\n" HPF_DIRECTIVE, HPF_CONTINUATION, HPF_INDEPENDENT, pdl); return t; } #define OMP_SENTINEL "!$OMP" #define OMP_DIRECTIVE OMP_SENTINEL " " #define OMP_CONTINUATION OMP_SENTINEL "x" #define OMP_PARALLELDO "PARALLEL DO " #define OMP_C_SENTINEL "#pragma omp" #define OMP_C_DIRECTIVE OMP_C_SENTINEL " " #define OMP_C_CONTINUATION OMP_C_SENTINEL "x" #define OMP_C_PARALLELDO "parallel for " text text_omp_directive(loop l, int m) { list pdl = NIL; // pdl is useless in Fortran text t = text_undefined; switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: t = text_directive(l, m, "\n" OMP_DIRECTIVE, OMP_CONTINUATION, OMP_PARALLELDO, pdl); break; case is_language_c: // text_directive function takes care of private variables // More should be done to take care of shared variables, reductions // and other specific omp clause like lastprivate, copyin ... t = text_directive(l, m, OMP_C_DIRECTIVE, OMP_C_CONTINUATION, OMP_C_PARALLELDO, pdl); break; default: pips_internal_error("Language unknown !"); break; } return t; } /* exported for fortran90.c */ text text_loop_default(entity module, const char* label, int margin, loop obj, int n, list pdl) { list pc = NIL; sentence first_sentence = sentence_undefined; unformatted u; text r = make_text(NIL); statement body = loop_body( obj ); entity the_label = loop_label(obj); const char* do_label = entity_local_name(the_label) + sizeof(LABEL_PREFIX) -1; bool structured_do = entity_empty_label_p(the_label); bool doall_loop_p = false; bool hpf_prettyprint = pp_hpf_style_p(); bool do_enddo_p = get_bool_property("PRETTYPRINT_DO_LABEL_AS_COMMENT"); bool all_private = get_bool_property("PRETTYPRINT_ALL_PRIVATE_VARIABLES"); bool braces_p = !one_liner_p(body) || prettyprint_all_c_braces_p; if (execution_sequential_p(loop_execution(obj))) { doall_loop_p = false; } else { doall_loop_p = pp_doall_style_p(); } /* HPF directives before the loop if required (INDEPENDENT and NEW) */ if (hpf_prettyprint) MERGE_TEXTS(r, text_hpf_directive(obj, margin)); /* idem if Open MP directives are required */ if (pp_omp_style_p()) MERGE_TEXTS(r, text_omp_directive(obj, margin)); /* LOOP prologue. */ switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(NIL, (doall_loop_p) ? "DOALL " : "DO " ); if (!structured_do && !doall_loop_p && !do_enddo_p) { pc = CHAIN_SWORD(pc, concatenate(do_label, " ", NULL)); } break; case is_language_c: pc = CHAIN_SWORD(NIL, (doall_loop_p) ? "forall(" : "for(" ); break; default: pips_internal_error("Language unknown !"); break; } //pc = CHAIN_SWORD(pc, entity_local_name(loop_index(obj))); pc = CHAIN_SWORD(pc, entity_user_name(loop_index(obj))); pc = CHAIN_SWORD(pc, " = "); switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = gen_nconc(pc, words_loop_range(loop_range(obj), pdl)); u = make_unformatted(strdup(label), n, margin, pc); ADD_SENTENCE_TO_TEXT(r, first_sentence = make_sentence(is_sentence_unformatted, u)); break; case is_language_c: pc = gen_nconc(pc, C_loop_range(loop_range(obj), loop_index(obj), pdl)); if (braces_p) pc = CHAIN_SWORD(pc," {"); if ((label != NULL) && (label[0] != '\0')) { pips_debug(9, "the label %s need to be print for a for C loop", label); u = make_unformatted(strdup(label), 0, 0, NULL); ADD_SENTENCE_TO_TEXT(r, first_sentence = make_sentence(is_sentence_unformatted, u)); } u = make_unformatted(NULL, n, margin, pc); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u)); break; default: pips_internal_error("Language unknown !"); break; } /* builds the PRIVATE scalar declaration if required */ if (!ENDP(loop_locals(obj)) && (doall_loop_p || all_private) && !hpf_prettyprint) { list /* of string */lp = loop_private_variables(obj, pdl); // initialize the local variable text if needed if ((local_flg == false) && (lp)) { local_flg = true; local_var = make_text(NIL); } if (lp) /* local_var is a global variable which is exploited later... */ /* FI: I do not understand why the local declarations were not added right away. I hope my change (simplification) does not break something else that is not tested by our non-regression suite. */ if (!pp_omp_style_p()) { ADD_SENTENCE_TO_TEXT // ( local_var, ( r, make_sentence(is_sentence_unformatted, make_unformatted(NULL, 0, margin+INDENTATION, lp))); } } /* loop BODY */ MERGE_TEXTS(r, text_statement_enclosed(module, margin+INDENTATION, body, !one_liner_p(body), !one_liner_p(body), pdl)); /* LOOP postlogue */ switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: if (structured_do || doall_loop_p || do_enddo_p || pp_cray_style_p() || pp_craft_style_p() || pp_cmf_style_p()) { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"ENDDO")); } break; case is_language_c: if (braces_p) ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}")); break; default: pips_internal_error("Language unknown !"); break; } attach_loop_to_sentence_up_to_end_of_text(first_sentence, r, obj); return r; } /* exported for conversion/look_for_nested_loops.c */ text text_loop( entity module, const char* label, int margin, loop obj, int n, list pdl) { text r = make_text(NIL); statement body = loop_body( obj ) ; entity the_label = loop_label(obj); const char* do_label = entity_local_name(the_label)+sizeof(LABEL_PREFIX) -1; bool structured_do = entity_empty_label_p(the_label); bool do_enddo_p = get_bool_property("PRETTYPRINT_DO_LABEL_AS_COMMENT"); /* small hack to show the initial label of the loop to name it... */ if(!structured_do && do_enddo_p) { ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, strdup(concatenate("! INITIALLY: DO ", do_label, "\n", NULL)))); } /* quite ugly management of other prettyprints... */ switch(execution_tag(loop_execution(obj)) ) { case is_execution_sequential: MERGE_TEXTS(r, text_loop_default(module, label, margin, obj, n, pdl)); break ; case is_execution_parallel: if (pp_cmf_style_p()) { text aux_r; if((aux_r = text_loop_cmf(module, label, margin, obj, n, NIL, NIL)) != text_undefined) { MERGE_TEXTS(r, aux_r); } } else if (pp_craft_style_p()) { text aux_r; if((aux_r = text_loop_craft(module, label, margin, obj, n, NIL, NIL)) != text_undefined) { MERGE_TEXTS(r, aux_r); } } else if (pp_f90_style_p()) { instruction bi = statement_instruction(body); // body instruction bool success_p = false; if(instruction_assign_p(bi) ) { MERGE_TEXTS(r, text_loop_90(module, label, margin, obj, n)); success_p = true; } else if(instruction_sequence_p(bi)) { list sl = sequence_statements(instruction_sequence(bi)); if(gen_length(sl)==1) { statement ibs = STATEMENT(CAR(sl)); instruction ibi = statement_instruction(ibs); if(instruction_assign_p(ibi) ) { MERGE_TEXTS(r, text_loop_90(module, label, margin, obj, n)); success_p = true; } } } if(!success_p) { MERGE_TEXTS(r, text_loop_default(module, label, margin, obj, n, pdl)); } } else { MERGE_TEXTS(r, text_loop_default(module, label, margin, obj, n, pdl)); } break ; default: pips_internal_error("Unknown tag") ; } return r; } static text text_whileloop(entity module, const char* label, int margin, whileloop obj, int n, list pdl) { list pc = NIL; sentence first_sentence; unformatted u; text r = make_text(NIL); statement body = whileloop_body( obj ); entity the_label = whileloop_label(obj); const char* do_label = entity_local_name(the_label) + sizeof(LABEL_PREFIX) -1; bool structured_do = entity_empty_label_p(the_label); bool do_enddo_p = get_bool_property("PRETTYPRINT_DO_LABEL_AS_COMMENT"); evaluation eval = whileloop_evaluation(obj); /* Show the initial label of the loop to name it... * FI: I believe this is useless for while loops since they cannot * be parallelized. */ if(!structured_do && do_enddo_p) { ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, strdup(concatenate("! INITIALLY: DO ", do_label, "\n", NULL)))); } if(evaluation_before_p(eval)) { switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: /* LOOP prologue. */ pc = CHAIN_SWORD(NIL, "DO " ); if(!structured_do && !do_enddo_p) { pc = CHAIN_SWORD(pc, concatenate(do_label, " ", NULL)); } pc = CHAIN_SWORD(pc, "WHILE ("); pc = gen_nconc(pc, words_expression(whileloop_condition(obj), pdl)); pc = CHAIN_SWORD(pc, ")"); u = make_unformatted(strdup(label), n, margin, pc); ADD_SENTENCE_TO_TEXT(r, first_sentence = make_sentence(is_sentence_unformatted, u)); /* loop BODY */ MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, body, pdl)); /* LOOP postlogue */ if(structured_do) { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"ENDDO")); } break; case is_language_c: { bool braces_p = !one_liner_p(body) || prettyprint_all_c_braces_p; if(!braces_p) { pc = CHAIN_SWORD(NIL,"while ("); pc = gen_nconc(pc, words_expression(whileloop_condition(obj), pdl)); pc = CHAIN_SWORD(pc,")"); u = make_unformatted(strdup(label), n, margin, pc); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u)); MERGE_TEXTS(r, text_statement_enclosed(module, margin+INDENTATION, body, !one_liner_p(body), !one_liner_p(body), pdl)); //if (structured_do) //ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}")); } else { pc = CHAIN_SWORD(NIL,"while ("); pc = gen_nconc(pc, words_expression(whileloop_condition(obj), pdl)); pc = CHAIN_SWORD(pc,") {"); u = make_unformatted(strdup(label), n, margin, pc); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u)); MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, body, pdl)); if(structured_do) ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}")); } } break; default: pips_internal_error("Language unknown !"); break; } } else { pips_assert ("Only C language is managed here", prettyprint_language_is_c_p()); /* C do { s; } while (cond); loop*/ pc = CHAIN_SWORD(NIL,"do {"); u = make_unformatted(strdup(label), n, margin, pc); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u)); MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, body, pdl)); ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}")); pc = CHAIN_SWORD(NIL,"while ("); pc = gen_nconc(pc, words_expression(whileloop_condition(obj), pdl)); pc = CHAIN_SWORD(pc, ");"); u = make_unformatted(NULL, n, margin, pc); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u)); } /* attach_loop_to_sentence_up_to_end_of_text(first_sentence, r, obj); */ return r; } /* exported for unstructured.c */ text init_text_statement( entity module, int margin, statement obj) { instruction i = statement_instruction(obj); text r; if (get_bool_property("PRETTYPRINT_ALL_EFFECTS") || !((instruction_block_p(i) && !get_bool_property("PRETTYPRINT_BLOCKS")) || (instruction_unstructured_p(i) && !get_bool_property("PRETTYPRINT_UNSTRUCTURED")))) { /* FI: before calling the hook, * statement_ordering(obj) should be checked */ r = (*text_statement_hook)( module, margin, obj ); if (text_statement_hook != empty_text) attach_decoration_to_text(r); } else r = make_text( NIL ) ; if (get_bool_property("PRETTYPRINT_ALL_EFFECTS") || get_bool_property("PRETTYPRINT_STATEMENT_ORDERING")) { char *buffer; int so = statement_ordering(obj) ; if (!(instruction_block_p(statement_instruction(obj)) && (! get_bool_property("PRETTYPRINT_BLOCKS")))) { if (so != STATEMENT_ORDERING_UNDEFINED) asprintf(&buffer, "%s (%d,%d)\n", get_comment_sentinel(), ORDERING_NUMBER(so), ORDERING_STATEMENT(so)); else asprintf(&buffer, "%s (statement ordering unavailable)\n", get_comment_sentinel()); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, buffer)); } } return( r ) ; } static text text_logical_if(entity __attribute__ ((unused)) module, const char* label, int margin, test obj, int n, list pdl) { text r = make_text(NIL); list pc = NIL; statement tb = test_true(obj); switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, strdup("IF (")); break; case is_language_c: pc = CHAIN_SWORD(pc, strdup("if (")); break; default: pips_internal_error("Language unknown !"); break; } pc = gen_nconc(pc, words_expression(test_condition(obj), pdl)); instruction ti = instruction_undefined; call c = call_undefined; text t = text_undefined; switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, ") "); ti = statement_instruction(tb); c = instruction_call(ti); pc = gen_nconc(pc, words_call(c, 0, true, true, pdl)); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, make_unformatted(strdup(label), n, margin, pc))); break; case is_language_c: pc = CHAIN_SWORD(pc, ")"); // Do not add a useless SPACE t = text_statement(module, margin + INDENTATION, tb, pdl); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, make_unformatted(strdup(label), n, margin, pc))); text_sentences(r) = gen_nconc(text_sentences(r), text_sentences(t)); text_sentences(t) = NIL; free_text(t); break; default: pips_internal_error("Language unknown !"); break; } ifdebug(8) { fprintf(stderr, "logical_if=================================\n"); print_text(stderr, r); fprintf(stderr, "==============================\n"); } return (r); } static bool test_with_no_else_clause_p(test t) { bool no_else_p = true; if(empty_statement_p(test_false(t))) no_else_p = false; else { statement fs = test_false(t); if(statement_test_p(fs)) // Go down recursively no_else_p = test_with_no_else_clause_p(statement_test(fs)); else no_else_p = true; } return no_else_p; } /* Some code shared by text_block_if and text_block_ifthen */ static bool test_with_dangling_else_p(test t) { statement fb = test_false(t); bool outer_else_p = !nop_statement_p(fb); // obj contains a non-empty else clause /* Do we have a test as a true branch, a test with no else clause? */ statement ts = effective_test_true(t); bool inner_test_p = statement_test_p(ts); bool inner_else_p = inner_test_p? test_with_no_else_clause_p(statement_test(ts)) : false; bool dangling_else_p = inner_test_p && outer_else_p && !inner_else_p; return dangling_else_p; } /* Prettyprint the condition, the true and, possibly, the false branch. * * Manage redundant braces in C according to either the standard, or * gcc guidelines or a request to print them all. * * Brace management is a bit complex because the clausing brace of the * true block may be printed with the else branch or as a final brace * when the else branch is empty. */ static text text_block_if(entity module, const char* label, int margin, test obj, int n, list pdl) { text r = make_text(NIL); list pc = NIL; statement test_false_obj; bool one_liner_true_statement_p = one_liner_p(test_true(obj)); bool one_liner_false_statement_p = one_liner_p(test_false(obj)); //bool else_branch_p = false; /* Is the else branch empty? */ bool dangling_else_p = test_with_dangling_else_p(obj); bool true_braces_p = !one_liner_true_statement_p || dangling_else_p || gcc_if_block_braces_required_p(obj) || prettyprint_all_c_braces_p; bool final_braces_p = true_braces_p; /* Prettyprint the condition and the true branch */ switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, "IF ("); pc = gen_nconc(pc, words_expression(test_condition(obj), pdl)); pc = CHAIN_SWORD(pc, ") THEN"); break; case is_language_c: pc = CHAIN_SWORD(pc, "if ("); pc = gen_nconc(pc, words_expression(test_condition(obj), pdl)); if(true_braces_p) pc = CHAIN_SWORD(pc, ") {"); else pc = CHAIN_SWORD(pc, ")"); break; default: pips_internal_error("Language unknown !"); break; } ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, make_unformatted(strdup(label), n, margin, pc))); MERGE_TEXTS(r, text_statement_enclosed(module, margin+INDENTATION, test_true(obj), !one_liner_true_statement_p, !one_liner_true_statement_p, pdl)); /* Prettyprint the false branch if it is useful */ test_false_obj = test_false(obj); if(statement_undefined_p(test_false_obj)) { pips_internal_error("undefined statement"); } if(!statement_with_empty_comment_p(test_false_obj) || (!empty_statement_p(test_false_obj) && !continue_statement_p(test_false_obj)) || (empty_statement_p(test_false_obj) && (get_bool_property("PRETTYPRINT_EMPTY_BLOCKS"))) || (continue_statement_p(test_false_obj) && (get_bool_property("PRETTYPRINT_ALL_LABELS")))) { //else_branch_p = true; switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"ELSE")); break; case is_language_c: if(true_braces_p) { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}")); final_braces_p = false; } /* FI: I am not sure this test is safe and that no dangling else can occur */ if(one_liner_false_statement_p && !prettyprint_all_c_braces_p) { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"else")); final_braces_p = false; } else { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"else {")); final_braces_p = true; } break; default: pips_internal_error("Language unknown !"); break; } MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, test_false_obj, pdl)); } /* Prettyprint the closing of the test */ switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,strdup("ENDIF"))); break; case is_language_c: if(final_braces_p) ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,strdup("}"))); break; default: pips_internal_error("Language unknown !"); break; } ifdebug(8) { fprintf(stderr, "text_block_if=================================\n"); print_text(stderr, r); fprintf(stderr, "==============================\n"); } return (r); } static text text_io_block_if(entity module, const char* label, int margin, test obj, int n, list pdl) { text r = make_text(NIL); list pc = NIL; if (!empty_statement_p(test_true(obj))) { char* label_local_name = new_label_local_name(module); char* strglab= label_local_name + 1; r = make_text(CONS(SENTENCE, sentence_goto_label(module, label, margin, strglab, n), NIL)); ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, make_unformatted(strdup(label), n, margin, pc))); MERGE_TEXTS(r, text_statement(module, margin, test_true(obj), pdl)); string str = string_undefined; switch (get_prettyprint_language_tag()) { case is_language_fortran: str = strdup(CONTINUE_FUNCTION_NAME); break; case is_language_c: str = strdup(C_CONTINUE_FUNCTION_NAME); break; case is_language_fortran95: pips_internal_error("Need to update F95 case"); break; default: pips_internal_error("Language unknown !"); break; } ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, make_unformatted(strdup(strglab), n, margin, CONS(STRING, str, NIL)))); free(label_local_name); } if (!empty_statement_p(test_false(obj))) MERGE_TEXTS(r, text_statement(module, margin, test_false(obj), pdl)); return (r); } /* Prettyprint a test when it falsbranch is empty */ static text text_block_ifthen(entity module, const char* label, int margin, test obj, int n, list pdl) { text r = make_text(NIL); list pc = NIL; statement tb = test_true(obj); bool dangling_else_p = test_with_dangling_else_p(obj); bool braces_p = !one_liner_p(tb) // several statement in the true branch || prettyprint_all_c_braces_p // use request for braces || dangling_else_p; // else clause would be associated to the wrong if switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, "IF ("); pc = gen_nconc(pc, words_expression(test_condition(obj), pdl)); pc = CHAIN_SWORD(pc, ") THEN"); break; case is_language_c: pc = CHAIN_SWORD(pc, "if ("); pc = gen_nconc(pc, words_expression(test_condition(obj), pdl)); pc = CHAIN_SWORD(pc, (!braces_p?")":") {")); break; default: pips_internal_error("Language unknown !"); break; } ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, make_unformatted(strdup(label), n, margin, pc))); MERGE_TEXTS(r, text_statement_enclosed(module, margin+INDENTATION, tb, braces_p, braces_p, pdl)); if (prettyprint_language_is_c_p() && braces_p) ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}")); return (r); } static text text_block_else(entity module, const char * label __attribute__ ((unused)), int margin, statement stmt, int n __attribute__ ((unused)), list pdl) { text r = make_text(NIL); if (!statement_with_empty_comment_p(stmt) || (!empty_statement_p(stmt) && !continue_statement_p(stmt)) || (empty_statement_p(stmt) && (get_bool_property("PRETTYPRINT_EMPTY_BLOCKS"))) || (continue_statement_p(stmt) && (get_bool_property("PRETTYPRINT_ALL_LABELS")))) { switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "ELSE")); MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, stmt, pdl)); break; case is_language_c: if (one_liner_p(stmt) && !prettyprint_all_c_braces_p) { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"else")); MERGE_TEXTS(r, text_statement_enclosed(module, margin+INDENTATION, stmt, false, false, pdl)); } else { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "else {")); MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, stmt, pdl)); ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "}")); } break; default: pips_internal_error("Language unknown !"); break; } } return r; } static text text_block_elseif(entity module, const char* label, int margin, test obj, int n, list pdl) { text r = make_text(NIL); list pc = NIL; statement tb = test_true(obj); statement fb = test_false(obj); bool braces_p = !one_liner_p(tb) || prettyprint_all_c_braces_p; switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, strdup("ELSEIF (")); pc = gen_nconc(pc, words_expression(test_condition(obj), pdl)); pc = CHAIN_SWORD(pc, strdup(") THEN")); break; case is_language_c: pc = CHAIN_SWORD(pc, strdup("else if (")); pc = gen_nconc(pc, words_expression(test_condition(obj), pdl)); pc = CHAIN_SWORD(pc, (!braces_p?")":") {")); break; default: pips_internal_error("Language unknown !"); break; } ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, make_unformatted(strdup(label), n, margin, pc))); MERGE_TEXTS(r, text_statement_enclosed(module, margin+INDENTATION, tb, braces_p, braces_p, pdl)); if (prettyprint_language_is_c_p() && braces_p) { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, strdup("}"))); } if (statement_test_p(fb) && empty_comments_p(statement_comments(fb)) && entity_empty_label_p(statement_label(fb))) { MERGE_TEXTS(r, text_block_elseif(module, label_local_name(statement_label(fb)), margin, statement_test(fb), n, pdl)); } else { MERGE_TEXTS(r, text_block_else(module, label, margin, fb, n, pdl)); } ifdebug(8) { fprintf(stderr, "elseif=================================\n"); print_text(stderr, r); fprintf(stderr, "==============================\n"); } return (r); } static text text_test(entity module, const char* label, int margin, test obj, int n, list pdl) { text r = text_undefined; statement tb = test_true(obj); statement fb = test_false(obj); /* 1st case: one statement in the true branch => Fortran logical IF or no braces in C */ if (nop_statement_p(fb) && statement_call_p(tb) && entity_empty_label_p(statement_label(tb)) && empty_comments_p(statement_comments(tb)) && !continue_statement_p(tb) && !get_bool_property("PRETTYPRINT_BLOCK_IF_ONLY") && !(call_contains_alternate_returns_p(statement_call(tb)) && get_bool_property("PRETTYPRINT_REGENERATE_ALTERNATE_RETURNS")) && !(prettyprint_all_c_braces_p && (get_prettyprint_language_tag()==is_language_c))) { r = text_logical_if(module, label, margin, obj, n, pdl); } /* 2nd case: one test in the false branch => "ELSEIF" Fortran block or "else if" C construct */ else if (statement_test_p(fb) && empty_comments_p(statement_comments(fb)) && entity_empty_label_p(statement_label(fb)) && !get_bool_property("PRETTYPRINT_BLOCK_IF_ONLY")) { r = text_block_ifthen(module, label, margin, obj, n, pdl); MERGE_TEXTS(r, text_block_elseif (module, label_local_name(statement_label(fb)), margin, statement_test(fb), n, pdl)); switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"ENDIF")); break; case is_language_c: //nothing to do in C break; default: pips_internal_error("Language unknown !"); break; } } else { syntax c = expression_syntax(test_condition(obj)); if (syntax_reference_p(c) && io_entity_p(reference_variable(syntax_reference(c))) && !get_bool_property("PRETTYPRINT_CHECK_IO_STATEMENTS")) r = text_io_block_if(module, label, margin, obj, n, pdl); else r = text_block_if(module, label, margin, obj, n, pdl); } ifdebug(8) { fprintf(stderr, "text_test=================================\n"); print_text(stderr, r); fprintf(stderr, "==============================\n"); } return r; } /* hook for adding something in the head. used by hpfc. * done so to avoid hpfc->prettyprint dependence in the libs. * FC. 29/12/95. */ static string (*head_hook)(entity) = NULL; void set_prettyprinter_head_hook(string(*f)(entity)){ head_hook=f;} void reset_prettyprinter_head_hook(){ head_hook=NULL;} static text text_instruction(entity module, const char* label, int margin, instruction obj, int n, list pdl) { text r = text_undefined; switch(instruction_tag(obj)) { case is_instruction_block: { r = text_block(module, label, margin, instruction_block(obj), n, pdl); break; } case is_instruction_test: { r = text_test(module, label, margin, instruction_test(obj), n, pdl); break; } case is_instruction_loop: { r = text_loop(module, label, margin, instruction_loop(obj), n, pdl); break; } case is_instruction_whileloop: { r = text_whileloop(module, label, margin, instruction_whileloop(obj), n, pdl); break; } case is_instruction_goto: { r = make_text(CONS(SENTENCE, sentence_goto(module, label, margin, instruction_goto(obj), n), NIL)); break; } case is_instruction_call: { unformatted u; sentence s; /* FI: in C at least, this has already been decided by the caller, text_statement_enclosed(); but apparently not in Fortran. Also, the source code may be in Fortran, but the user wants it prettyprinted as C. */ if (prettyprint_language_is_fortran_p() && instruction_continue_p(obj) && empty_string_p(label) && !get_bool_property("PRETTYPRINT_ALL_LABELS")) { pips_debug(5, "useless Fortran CONTINUE not printed\n"); r = make_text(NIL); } else { switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: u = make_unformatted(strdup(label), n, margin, words_call(instruction_call(obj), 0, true, true, pdl)); break; case is_language_c: u = make_unformatted(strdup(label), n, margin, CHAIN_SWORD(words_call(instruction_call(obj), 0, true, true, pdl), C_STATEMENT_END_STRING)); break; default: pips_internal_error("Language unknown !"); break; } s = make_sentence(is_sentence_unformatted, u); r = make_text(CONS(SENTENCE, s, NIL)); } break; } case is_instruction_unstructured: { // append local variables if there is some. // local variable need to be inserted before diging the // unstructured graph. r = insert_locals(r); text tmp = text_undefined; tmp = text_unstructured(module, label, margin, instruction_unstructured(obj), n); // append the unstructured to the current text if it exists if ((r != text_undefined) && (r != NULL)) { MERGE_TEXTS (r, tmp); } else { r = tmp; } break; } case is_instruction_forloop: { r = text_forloop(module, label, margin, instruction_forloop(obj), n, pdl); break; } case is_instruction_expression: { list pc = words_expression(instruction_expression(obj), pdl); unformatted u; pc = CHAIN_SWORD(pc,C_CONTINUE_FUNCTION_NAME); u = make_unformatted(strdup(label), n, margin, pc); r = make_text(CONS(SENTENCE, make_sentence(is_sentence_unformatted, u), NIL)); break; } default: { pips_internal_error("unexpected tag"); } } return (r); } /* In case the input code is not C code, non-standard comments have to be detected */ bool C_comment_p(string c){ bool is_C_comment=true; char * ccp=c; char cc=' '; init: cc=*ccp++; if(cc==' '|| cc=='\t' || cc=='\n') goto init; else if( cc=='/') goto slash; else if(cc=='\000') goto end; else { is_C_comment=false; goto end; } slash: cc=*ccp++; if(cc=='*') goto slash_star; else if(cc=='/') goto slash_slash; else{ is_C_comment=false; goto end; } slash_star: cc=*ccp++; if(cc=='*') goto slash_star_star; else if(cc=='\0'){ is_C_comment=false; goto end; } else goto slash_star; slash_slash: cc=*ccp++; if(cc=='\n') goto init; if(cc=='\0') // The comment may not end first with a '\n' goto end; else goto slash_slash; slash_star_star: cc=*ccp++; if(cc=='/') goto init; else if(cc=='*') goto slash_star_star; else if(cc=='\0'){ is_C_comment=false; goto end; } else goto slash_star; end : return is_C_comment; } /* In case comments are not formatted according to C rules, e.g. when prettyprinting Fortran code as C code, add // at beginning of lines */ text C_any_comment_to_text(int r_margin, string c) { string lb = c; /* line beginning */ string le = c; /* line end */ string cp = c; /* current position, pointer in comments */ text ct = make_text(NIL); bool is_C_comment = C_comment_p(c); int e_margin = r_margin; /* We do not need spaces before a line feed */ if(strcmp(c, "\n")==0) e_margin = 0; if(strlen(c)>0) { for(;*cp!='\0';cp++) { if(*cp=='\n') { if(cp!=c || true){ // Do not skip \n string cl = gen_strndup0(lb, le-lb); sentence s = sentence_undefined; if(is_C_comment) s = MAKE_ONE_WORD_SENTENCE(e_margin, cl); else if(strlen(cl)>0){ list pc = CHAIN_SWORD(NIL, cl); // cl is uselessly duplicated pc = CONS(STRING, MAKE_SWORD("//"), pc); s= make_sentence(is_sentence_unformatted, make_unformatted((char *) NULL, 0, e_margin, pc)); } else { s = MAKE_ONE_WORD_SENTENCE(0, cl); } ADD_SENTENCE_TO_TEXT(ct, s); free(cl); } lb = cp+1; le = cp+1; } else le++; } // Final \n has been removed in the parser presumably by Ronan // But this is also useful when non-standard comments are added, // for instance by phase "comment_prepend" if(lb<cp){ sentence s = sentence_undefined; string sl = gen_strndup0(lb,le-lb); if(is_C_comment) { s = MAKE_ONE_WORD_SENTENCE(e_margin,sl); } else { list pc = CHAIN_SWORD(NIL, sl); // sl is uselessly duplicated pc = CONS(STRING, MAKE_SWORD("//"), pc); s = make_sentence(is_sentence_unformatted, make_unformatted((char *) NULL, 0, e_margin, pc)); } ADD_SENTENCE_TO_TEXT(ct,s); free(sl); } else{ //ADD_SENTENCE_TO_TEXT(ct,MAKE_ONE_WORD_SENTENCE(0,"")); ; } } else{// Final \n has been removed by Ronan //ADD_SENTENCE_TO_TEXT(ct,MAKE_ONE_WORD_SENTENCE(0,"")); ; } return ct; } // Ronan's improved version is bugged. It returns many lines for a // unique \n because le is not updated before looping. Has this code // been validated? text C_standard_comment_to_text(int margin, string comment) { string line; string le = comment; /* position of a line end */ text ct = make_text(NIL); do { /* Find the first end of line: */ le = strchr(comment, '\n'); if (le == NULL) /* No end-of-line, so use all the rest of the comment: */ line = strdup(comment); else { /* Skip the '\n' at the end since the line concept is the notion of sentence */ line = gen_strndup0(comment, le - comment); /* Analyze the next line: */ comment = le + 1; } /* Do not indent if the line is empty */ ADD_SENTENCE_TO_TEXT(ct, MAKE_ONE_WORD_SENTENCE(line[0] == '\0' ? 0 : margin, line)); } while (le != NULL); return ct; } /* Special handling for C comments with each line indented according to the context. I do not see the interest if the user code is already indented... RK OK, since the blanks outside the comments are removed by the parser. */ text C_comment_to_text(int margin, string comment) { text ct = text_undefined; if(C_comment_p(comment)) //ct = C_standard_comment_to_text(margin, comment); ct = C_any_comment_to_text(margin, comment); else ct = C_any_comment_to_text(margin, comment); return ct; } static list cstrsplit(const char * s, char delim) { list out = NIL; const char *b=s,*e=s; while(*e) { while(*e && *e!=delim) ++e; char * word = strndup(b,e-b); out=CONS(STRING,word,out); if(*e) { ++e; b=e; } } return gen_nreverse(out); } /* returner a formatted comment, that takes care of adding the relevant // or C * depending on output language */ static string ensure_comment_consistency(const char * i_comments, language l) { string comments; /* Special handling of comments linked to declarations and to the poor job of the lexical analyzer as regards C comments: failure. */ if(empty_comments_p(i_comments)) { comments = strdup(""); } else { if(get_bool_property("PRETTYPRINT_CHECK_COMMENTS")) { char * patterns [] = { NULL, NULL, NULL, NULL, NULL, NULL }; char prefix[3]= { 0,0,0 }; if(language_c_p(l)) { patterns[0] = "//"; patterns[1] = "/*"; strcat(prefix,"//"); } else if(language_fortran95_p(l) || language_fortran_p(l)) { patterns[0]= "C"; patterns[1]= "!"; patterns[2]= "*"; patterns[3]= "c"; patterns[4]= "#"; // a single test case in PIPS validation forces me to do this (Syntax/sharpcomment) if(language_fortran95_p(l)) strcat(prefix,"! "); else strcat(prefix,"C ");//to keep consistency with old fashioned code } // be multi-line comments compliant list lines = cstrsplit(i_comments,'\n'); list lcomments = NIL; for(list liter=lines;!ENDP(liter);POP(liter)){ string line = STRING(CAR(liter)); bool comment_ok =false; char *iter =line; while(*iter && isspace(*iter)) iter++; if(*iter) { for(char **piter=&patterns[0];*piter;piter++) { if((comment_ok=(strncmp(iter,*piter,strlen(*piter))==0))) break; } if(!comment_ok) asprintf(&comments,"%s%s",prefix,line); else comments=strdup(line); } else /*blank line */ comments=strdup(line); if(language_c_p(l) && strncmp(iter,"/*",2)==0 ){ // multi-line comment started, assume it's ok now lcomments=gen_nconc(lcomments,gen_copy_string_list(liter)); break; // so bad if we close the multi-line comment and keep commenting afterwards ... } else lcomments=gen_nconc(lcomments,CONS(STRING,comments,NIL)); } comments=words_join(lcomments,"\n"); gen_free_string_list(lcomments); gen_free_string_list(lines); } else return strdup(i_comments); #if 0 if(declaration_statement_p(stmt)) { /* LF interspersed within C struct or union or initialization declarations may damage the user comment. However, there is no way no know if the LF are valid because thay are located between two statements or invalid because they are located within one statement. The information is lost by the lexer and the parser. */ //comments = string_strip_final_linefeeds(strdup(i_comments)); //comments = string_fuse_final_linefeeds(strdup(i_comments)); comments = strdup(i_comments); } else { comments = strdup(i_comments); } #endif } return comments; } /* Build the text of a statement @param module: the module containing the statement @param imargin: current tabulation @param stmt: the statement to print @param braces_p: the statement is within a block; this has an impact of the print-out of continue statements in C, ";" @param drop_continue_p: another condition to control the print-out of ";" or not; @param pdl: previous declaration list; list of entities that have already been declared and should not be redeclared; this is required for struct and union which may be declared independently or in a nested way. See C_syntax/struct03, 04, 05, etc... @return the text of the statement Notes: - in simple tests, the statement ";" may be mandatory or not. - continue may be used to preserve comments and then the ";" may be dropped - source fidelity would be easier if a new NOP statement that is never printed out were used. */ text text_statement_enclosed(entity module, int imargin, statement stmt, bool braces_p, bool drop_continue_p, list pdl) { instruction i = statement_instruction(stmt); //synchronization sync = statement_synchronization(stmt); text r= make_text(NIL); text temp; string i_comments = statement_comments(stmt); string comments = string_undefined; bool braces_added = false; int nmargin = imargin; // To ease breakpoint setting //pips_assert("Blocks have no comments", !instruction_block_p(i)||empty_comments_p(comments)); if(instruction_block_p(i) && !empty_comments_p(i_comments)) { pips_internal_error("Blocks should have no comments"); } comments = ensure_comment_consistency(i_comments,get_prettyprint_language()); if(prettyprint_language_is_c_p() && statement_block_p(stmt) && !empty_extensions_p(statement_extensions(stmt))) { string ext = extensions_to_string(statement_extensions (stmt), true); if (ext != string_undefined) { ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, ext)); braces_added = true; ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(imargin, "{")); nmargin += INDENTATION; } } /* Generate text for local declarations * * 31/07/2003 Nga Nguyen : This code is added for C, because a * statement can have its own declarations */ list dl = statement_declarations(stmt); /* FI: consistency check - incompatible with unfolding.. and with the C parser... */ ifdebug(1) { /* The real check is that dl and idl are equal, that is ENDP(gen_list_and_not(dl,idl)) && ENDP(gen_list_and_not(idl,dl)), except for the side effects of gen_list_and_not(), so dl and idl should be copied first. */ if(statement_block_p(stmt)) { list idl = statement_to_direct_declarations(stmt); if(ENDP(dl) && !ENDP(idl)) { /* This may occur when declaration statements are added using subsequences by somebody forgetfull of scope issues */ // Do not forget: the error is detected within the prettyprinter... //print_statement(stmt); print_entities(idl); pips_internal_error("A block statement with no declarations" " contains declarations\n"); } else if(gen_length(dl)!=gen_length(idl)) { print_entities(dl); fprintf(stderr, "\n"); // FI, OK a fputc might do as well print_entities(idl); fprintf(stderr, "\n"); pips_internal_error("A block statement with %d declarations" " contains %d declarations in its statements\n", gen_length(dl), gen_length(idl)); } else gen_free_list(idl); } if(statement_block_p(stmt) && !ENDP(dl)) { /* See for instance Transformations/Simplify_control.sub/sequence01 */ list sl = statement_block(stmt); if(ENDP(sl)) { pips_internal_error("A block statement with declarations" " contains no declaration statements\n"); } } } if (!ENDP(dl) && prettyprint_language_is_c_p()) { if(statement_block_p(stmt)) { if(!braces_p && !braces_added) { braces_added = true; ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(imargin, "{")); nmargin += INDENTATION; } } else { pips_assert("declarations are carried by continue statements", continue_statement_p(stmt)); } // initialize the local variable text if needed if (local_flg == false) { local_flg = true; local_var = make_text(NIL); } if(declaration_statement_p(stmt)) { int sn = statement_number(stmt); MERGE_TEXTS(local_var, c_text_related_entities(module,dl,nmargin,sn,dl)); } else { //MERGE_TEXTS(local_var, c_text_entities(module,l,nmargin)); // Do nothing and rely on CONTINUE statements... ; } } pips_debug(2, "Begin for statement %s with braces_p=%d\n", statement_identification(stmt),braces_p); pips_debug(9, "statement_comments: --%s--\n", string_undefined_p(comments)? "<undef>": comments); if(statement_number(stmt)!=STATEMENT_NUMBER_UNDEFINED && statement_ordering(stmt)==STATEMENT_ORDERING_UNDEFINED) { /* we are in trouble with some kind of dead (?) code... but we might as well be dealing with some parsed_code */ pips_debug(1, "I unexpectedly bumped into dead code?\n"); } const char* label; bool pragma_before_label_in_C = prettyprint_language_is_c_p() && statement_with_pragma_p(stmt) && !unlabelled_statement_p(stmt); if (pragma_before_label_in_C) /* We are in trouble because a pragma in C should appear after the label but the Fortran-oriented prettyprinter is to prettyprint a label and an instruction in block. So we print the instruction without the label that is to be added in another way afterwards */ label = ""; else label = label_local_name(statement_label(stmt)); if (entity_return_label_p(statement_label(stmt))) { pips_assert("Statement with return label must be a return statement", return_statement_p(stmt)); /* do not add a redundant RETURN before an END, unless requested or unless needed because a value must be returned in C */ bool last_statement_p(statement); if(get_bool_property("PRETTYPRINT_FINAL_RETURN") || !last_statement_p(stmt) || (!void_function_p(module) && c_module_p(module))) { /*<<<<<<< .working sentence s = MAKE_ONE_WORD_SENTENCE(nmargin, prettyprint_language_is_c_p()?C_RETURN_FUNCTION_NAME";":RETURN_FUNCTION_NAME); =======*/ sentence s = sentence_undefined; if(entity_undefined_p(module) || void_function_p(module) || fortran_module_p(module)) { s = MAKE_ONE_WORD_SENTENCE(nmargin, prettyprint_language_is_c_p()? C_RETURN_FUNCTION_NAME";" :RETURN_FUNCTION_NAME); } else { // Must be a non void C function entity rv = function_to_return_value(module); list pc = NIL; pc = CHAIN_SWORD(pc, C_RETURN_FUNCTION_NAME); pc = CHAIN_SWORD(pc, " "); pc = CHAIN_SWORD(pc, entity_user_name(rv)); pc = CHAIN_SWORD(pc, C_CONTINUE_FUNCTION_NAME); unformatted u = make_unformatted((char *) NULL, 0, nmargin, pc); s = make_sentence_unformatted(u); } //>>>>>>> .merge-right.r18859 temp = make_text(CONS(SENTENCE, s, NIL)); } else { temp = make_text(NIL); } } else { entity m = entity_undefined_p(module)? get_current_module_entity() : module; if(true || !compilation_unit_p(entity_name(m))) { /* Do we need to print this CONTINUE statement in C? */ string cs = statement_comments(stmt); if (prettyprint_language_is_c_p() && (braces_p || drop_continue_p) && unlabelled_statement_p(stmt) && instruction_continue_p(i)) { if(!ENDP(statement_declarations(stmt))) { /* The declarations will be printed, no need for anything else */ temp = make_text(NIL); } else if(string_undefined_p(cs) || cs == NULL || strcmp(cs, "")==0) { sentence s = MAKE_ONE_WORD_SENTENCE(0, ""); temp = make_text(CONS(SENTENCE, s, NIL)); //temp = make_text(NIL); } else if(strcmp(cs, "\n")==0) { // MAKE_ONE_WORD_SENTENCE already implies a '\n' sentence s = MAKE_ONE_WORD_SENTENCE(0, ""); temp = make_text(CONS(SENTENCE, s, NIL)); } else temp = text_instruction(module, label, nmargin, i, statement_number(stmt), pdl); } else temp = text_instruction(module, label, nmargin, i, statement_number(stmt), pdl); } else temp = make_text(NIL); } /* Take care of comments and of analysis results printed as comments * * Note about comments: they are duplicated here, but I'm pretty * sure that the free is NEVER performed as it should. FC. */ if(!ENDP(text_sentences(temp))) { /* There is something to output for the instruction... */ MERGE_TEXTS(r, init_text_statement(module, nmargin, stmt)); if (! empty_comments_p(comments)) { text ct = text_undefined; switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, strdup(comments))); break; case is_language_c: ct = C_comment_to_text(nmargin, comments); MERGE_TEXTS(r, ct); break; default: pips_internal_error("Language unknown !"); break; } } } else { /* There is nothing to output for the instruction itself. Preserve comments and empty C instruction */ if (! empty_comments_p(comments)) { text ct = text_undefined; switch (get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, strdup(comments))); break; case is_language_c: ct = C_comment_to_text(nmargin, comments); MERGE_TEXTS(r, ct); MERGE_TEXTS(r, init_text_statement(module, nmargin, stmt)); break; default: pips_internal_error("Language unknown !"); break; } } else if(prettyprint_language_is_c_p() && !braces_p && !braces_added &&ENDP(dl)) { // Because C braces can be eliminated and hence semi-colon // may be mandatory in a test branch or in a loop body. // A. Mensi sentence s = MAKE_ONE_WORD_SENTENCE(nmargin, strdup(C_CONTINUE_FUNCTION_NAME)); ADD_SENTENCE_TO_TEXT(r, s); } else if(!ENDP(dl)) { MERGE_TEXTS(r, init_text_statement(module, nmargin, stmt)); } } /* Add the label if not already done, in the case we want it before a extension/pragma: */ if (pragma_before_label_in_C) ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, make_unformatted(strdup(label_local_name(statement_label(stmt))), STATEMENT_NUMBER_UNDEFINED, 0, NULL))); if(!(prettyprint_language_is_c_p() && statement_block_p(stmt))) { /* Append the extensions after comments: */ string ext = extensions_to_string(statement_extensions (stmt), true); if (ext != string_undefined) { ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, ext)); } } /* Then add any instruction text: */ MERGE_TEXTS(r, temp); /* append local variables that might have not been inserted previously FI: this seems to be quite late and might explain the problem with local variables of Fortran do loops. Might, because I've never managed to figure out exactly what happens... */ r = insert_locals (r); if (braces_added) { ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(imargin, "}")); } attach_statement_information_to_text(r, stmt); // the last thing to do is to close the extension string close = close_extensions (statement_extensions (stmt), true); if (close != string_undefined) { ADD_SENTENCE_TO_TEXT(r,make_sentence(is_sentence_formatted, close)); } ifdebug(1) { if (instruction_sequence_p(i)) { if(!(statement_with_empty_comment_p(stmt) && statement_number(stmt) == STATEMENT_NUMBER_UNDEFINED && unlabelled_statement_p(stmt))) { user_log("Block statement %s\n" "Block number=%d, Block label=\"%s\", block comment=\"%s\"\n", statement_identification(stmt), statement_number(stmt), label_local_name(statement_label(stmt)), statement_comments(stmt)); pips_internal_error("This block statement should be labelless," " numberless and commentless.\n"); } } } ifdebug(8){ fprintf(stderr,"text_statement_enclosed=================================\n"); print_text(stderr,r); fprintf(stderr,"==============================\n"); } free(comments); pips_debug(2, "End for statement %s\n", statement_identification(stmt)); return(r); } /* Handles all statements but tests that are nodes of an unstructured. Those are handled by text_control. @param module: the module containing the statement @param margin: current tabulation @param stmt: the statement to print @param pdl: previous declaration list; list of entities that have already been declared and should not be redeclared; this is required for struct and union which may be declared independently or in a nested way. See C_syntax/struct03, 04, 05, etc... @return the text of the statement */ text text_statement( entity module, int margin, statement stmt, list pdl) { return text_statement_enclosed(module, margin, stmt, true, true, pdl); } /* Keep track of the last statement to decide if a final return can be * omitted or not. If no last statement can be found for sure, for * instance because it depends on the prettyprinter, last_statement_found is * set to statement_undefined which is safe. * * FI: for purposes unrelated to prettyprint, see * last_statement(). This function is part of the prettyprinter and * probably only useful for Fortran code. */ static statement last_statement_found = statement_undefined; statement find_last_statement(statement s) { statement last = statement_undefined; pips_assert("statement is defined", !statement_undefined_p(s)); if(statement_sequence_p(s)) { list ls = instruction_block(statement_instruction(s)); last = (ENDP(ls)? statement_undefined : STATEMENT(CAR(gen_last(ls)))); } else if(statement_unstructured_p(s)) { unstructured u = statement_unstructured(s); list trail = unstructured_to_trail(u); last = control_statement(CONTROL(CAR(trail))); gen_free_list(trail); } else if(statement_call_p(s)) { /* Hopefully it is a return statement. * Since the semantics of STOP is ignored by the parser, a * final STOp should be followed by a RETURN. */ last = s; } else { /* loop or test cannot be last statements of a module */ last = statement_undefined; } /* recursive call */ if(!statement_undefined_p(last) && (statement_sequence_p(last) || statement_unstructured_p(last))) { last = find_last_statement(last); } /* Too many program transformations and syntheses violate the following assert */ if(!(statement_undefined_p(last) || !statement_sequence_p(s) || return_statement_p(last))) { switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pips_user_warning("Last statement is not a RETURN!\n"); break; case is_language_c: /* No warning needed for C, is it right for C ?*/ break; default: pips_internal_error("Language unknown !"); break; } last = statement_undefined; } /* I had a lot of trouble writing the condition for this assert... */ pips_assert("Last statement is either undefined or a call to return", statement_undefined_p(last) /* let's give up: it's always safe */ || !statement_sequence_p(s) /* not a block: any kind of statement... */ || return_statement_p(last)); /* if a block, then a return */ return last; } void set_last_statement(statement s) { statement ls = statement_undefined; pips_assert("last statement is undefined", statement_undefined_p(last_statement_found)); ls = find_last_statement(s); last_statement_found = ls; } void reset_last_statement() { last_statement_found = statement_undefined; } bool last_statement_p(statement s) { pips_assert("statement is defined\n", !statement_undefined_p(s)); return s == last_statement_found; } /* Build the text of a module. The original text of the declarations is used if possible in Fortran. Otherwise, the function text_declaration is called. */ text text_named_module( entity name, /**< the name of the module */ entity module, statement stat) { text r = make_text(NIL); code c = entity_code(module); string s = code_decls_text(c); text ral = text_undefined; debug_on("PRETTYPRINT_DEBUG_LEVEL"); /* Set the prettyprint language */ set_prettyprint_language_from_property(language_tag(code_language(c))); /* This guard is correct but could be removed if find_last_statement() * were robust and/or if the internal representations were always "correct". * See also the guard for reset_last_statement() */ if(!get_bool_property("PRETTYPRINT_FINAL_RETURN")) set_last_statement(stat); precedence_p = !get_bool_property("PRETTYPRINT_ALL_PARENTHESES"); prettyprint_all_c_braces_p = get_bool_property("PRETTYPRINT_ALL_C_BRACES"); prettyprint_gcc_c_braces_p = get_bool_property("PRETTYPRINT_GCC_C_BRACES"); list l = NIL; switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: if(strcmp(s, "") == 0 || get_bool_property("PRETTYPRINT_ALL_DECLARATIONS")) { if(get_bool_property("PRETTYPRINT_HEADER_COMMENTS")) /* Add the original header comments if any: */ ADD_SENTENCE_TO_TEXT(r, get_header_comments(module)); ADD_SENTENCE_TO_TEXT(r, attach_head_to_sentence(sentence_head(name, NIL), module)); if(head_hook) ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, head_hook(module))); if(get_bool_property("PRETTYPRINT_HEADER_COMMENTS")) /* Add the original header comments if any: */ ADD_SENTENCE_TO_TEXT(r, get_declaration_comments(module)); MERGE_TEXTS(r, text_declaration(module)); MERGE_TEXTS(r, text_initializations(module)); } else { ADD_SENTENCE_TO_TEXT(r, attach_head_to_sentence(make_sentence(is_sentence_formatted, strdup(s)), module)); } break; case is_language_c: /* C prettyprinter */ pips_debug(3,"Prettyprint function %s\n",entity_name(name)); if(!compilation_unit_p(entity_name(name))) { //entity cu = module_entity_to_compilation_unit_entity(module); //list pdl = code_declarations(value_code(entity_initial(cu)))); /* Print function header if the current module is not a compilation unit*/ ADD_SENTENCE_TO_TEXT(r,attach_head_to_sentence(sentence_head(name, NIL), module)); ADD_SENTENCE_TO_TEXT(r,MAKE_ONE_WORD_SENTENCE(0,"{")); /* get the declarations for Fortran codes prettyrinted as C, as the declarations are not located in the module statement. A.Mensi */ if(ENDP(statement_declarations(stat)) && fortran_module_p(module)) { l = code_declarations(value_code(entity_initial(module))); MERGE_TEXTS(r,c_text_entities(module, l, INDENTATION, NIL)); } } break; default: pips_internal_error("Language unknown !"); break; } set_alternate_return_set(); reset_label_counter(); if (stat != statement_undefined) { /* FI: This function should not be used here because it is part of the preprocessor library... */ //entity cu = module_entity_to_compilation_unit_entity(module); switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: MERGE_TEXTS(r, text_statement(module, get_prettyprint_indentation(), stat, NIL)); break; case is_language_c: MERGE_TEXTS(r, text_statement(module, (compilation_unit_p(entity_name(name)))?0:INDENTATION, stat, NIL)); break; default: pips_internal_error("Language unknown !"); break; } } ral = generate_alternate_return_targets(); reset_alternate_return_set(); MERGE_TEXTS(r, ral); if(!compilation_unit_p(entity_name(name)) || prettyprint_language_is_fortran_p()) { /* No need to print TAIL (}) if the current module is a C compilation unit*/ ADD_SENTENCE_TO_TEXT(r, sentence_tail(module)); } if(!get_bool_property("PRETTYPRINT_FINAL_RETURN")) reset_last_statement(); debug_off(); return(r); } text text_module(entity module, statement stat) { return text_named_module(module, module, stat); } text text_graph(), text_control() ; string control_slabel() ; /* The node itentifiers are generated from the ordering, more stable than the control node address: */ void add_control_node_identifier_to_text(text r, control c) { _int so = statement_ordering(control_statement(c)); add_one_unformated_printf_to_text(r, "c_%d_%d", ORDERING_NUMBER(so), ORDERING_STATEMENT(so)); } void output_a_graph_view_of_the_unstructured_successors(text r, entity module, int margin, control c) { list pdl = NIL; // FI: I have no idea how to initialize it in this context... add_one_unformated_printf_to_text(r, "%s ", PRETTYPRINT_UNSTRUCTURED_ITEM_MARKER); add_control_node_identifier_to_text(r, c); add_one_unformated_printf_to_text(r, "\n"); if (get_bool_property("PRETTYPRINT_UNSTRUCTURED_AS_A_GRAPH_VERBOSE")) { add_one_unformated_printf_to_text(r, "C Unstructured node %p ->", c); MAP(CONTROL, a_successor, add_one_unformated_printf_to_text(r, " %p", a_successor), control_successors(c)); add_one_unformated_printf_to_text(r,"\n"); } MERGE_TEXTS(r, text_statement(module, margin, control_statement(c), pdl)); add_one_unformated_printf_to_text(r, PRETTYPRINT_UNSTRUCTURED_SUCC_MARKER); MAP(CONTROL, a_successor, { add_one_unformated_printf_to_text(r, " "); add_control_node_identifier_to_text(r, a_successor); }, control_successors(c)); add_one_unformated_printf_to_text(r,"\n"); } bool output_a_graph_view_of_the_unstructured_from_a_control(text r, entity module, int margin, control begin_control, control exit_control) { bool exit_node_has_been_displayed = false; list blocs = NIL; CONTROL_MAP(c, { /* Display the statements of each node followed by the list of its successors if any: */ output_a_graph_view_of_the_unstructured_successors(r, module, margin, c); if (c == exit_control) exit_node_has_been_displayed = true; }, begin_control, blocs); gen_free_list(blocs); return exit_node_has_been_displayed; } void output_a_graph_view_of_the_unstructured(text r, entity module, const char * label __attribute__ ((unused)), int margin, unstructured u, int __attribute__ ((unused)) num) { bool exit_node_has_been_displayed = false; control begin_control = unstructured_control(u); control end_control = unstructured_exit(u); add_one_unformated_printf_to_text(r, "%s ", PRETTYPRINT_UNSTRUCTURED_BEGIN_MARKER); add_control_node_identifier_to_text(r, begin_control); add_one_unformated_printf_to_text(r, " end: "); add_control_node_identifier_to_text(r, end_control); add_one_unformated_printf_to_text(r, "\n"); exit_node_has_been_displayed = output_a_graph_view_of_the_unstructured_from_a_control(r, module, margin, begin_control, end_control); /* If we have not displayed the exit node, that mean that it is not connex with the entry node and so the code is unreachable. Anyway, it has to be displayed as for the classical Sequential View: */ if (! exit_node_has_been_displayed) { /* Note that since the controlizer adds a dummy successor to the exit node, use output_a_graph_view_of_the_unstructured_from_a_control() instead of output_a_graph_view_of_the_unstructured_successors(): */ output_a_graph_view_of_the_unstructured_from_a_control(r, module, margin, end_control, end_control); /* Even if the code is unreachable, add the fact that the control above is semantically related to the entry node. Add a dash arrow from the entry node to the exit node in daVinci, for example: */ add_one_unformated_printf_to_text(r, "%s ", PRETTYPRINT_UNREACHABLE_EXIT_MARKER); add_control_node_identifier_to_text(r, begin_control); add_one_unformated_printf_to_text(r, " -> "); add_control_node_identifier_to_text(r, end_control); add_one_unformated_printf_to_text(r, "\n"); if (get_bool_property("PRETTYPRINT_UNSTRUCTURED_AS_A_GRAPH_VERBOSE")) add_one_unformated_printf_to_text(r, "C Unreachable exit node (%p -> %p)\n", begin_control, end_control); } add_one_unformated_printf_to_text(r, "%s ", PRETTYPRINT_UNSTRUCTURED_END_MARKER); add_control_node_identifier_to_text(r, begin_control); add_one_unformated_printf_to_text(r, " end: "); add_control_node_identifier_to_text(r, end_control); add_one_unformated_printf_to_text(r, "\n"); } /* ================C prettyprinter functions================= */ static list words_cast(cast obj, int precedence, list pdl) { list pc = NIL; type t = cast_type(obj); expression exp = cast_expression(obj); bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES"); pc = CHAIN_SWORD(pc,"("); pc = gen_nconc(pc, c_words_entity(t, NIL, pdl)); pc = CHAIN_SWORD(pc, space_p? ") " : ")"); pc = gen_nconc(pc, words_subexpression(exp, CAST_OPERATOR_PRECEDENCE, true, pdl)); if(get_bool_property("PRETTYPRINT_ALL_PARENTHESES") || precedence >= 25) { pc = CONS(STRING, strdup("("), gen_nconc(pc,CONS(STRING, strdup(")"), NIL))); } return pc; } static list words_sizeofexpression(sizeofexpression obj, bool in_type_declaration, list pdl) { list pc = NIL; switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pips_user_warning("generating FORTRAN 2008 function call defined in the the module ISO_C_BINDING\n"); pc = CHAIN_SWORD(pc,"c_sizeof("); break; case is_language_c: pc = CHAIN_SWORD(pc,"sizeof("); break; default: pips_internal_error("Language unknown !"); break; } if (sizeofexpression_type_p(obj)) { type t = sizeofexpression_type(obj); /* FI: the test used below is probably too strict I believe, because dimensions are not allowed, but I may be wrong*/ if(derived_type_p(t)) { entity te = basic_derived(variable_basic(type_variable(t))); if(!gen_in_list_p((void *) te, pdl)) { list pca = words_type(sizeofexpression_type(obj), pdl, false); pc = gen_nconc(pc, pca); } else { /* The type must be fully declared: see struct15.c */ list pct = c_words_simplified_entity(t, NIL, true, in_type_declaration, pdl); pc = gen_nconc(pc, pct); } } else { list pca = words_type(sizeofexpression_type(obj), pdl, false); pc = gen_nconc(pc, pca); } } else pc = gen_nconc(pc, words_expression(sizeofexpression_expression(obj), pdl)); pc = CHAIN_SWORD(pc,")"); return pc; } static list words_subscript(subscript s, list pdl) { list pc = NIL; expression a = subscript_array(s); list lexp = subscript_indices(s); bool first = true; /* Parentheses must be added for array expression * like __ctype+1 in (__ctype+1)[*np] */ /* Here we differentiate the indices parenthesis syntax */ switch(get_prettyprint_language_tag()) { case is_language_fortran: pips_internal_error("We don't know how to prettyprint a subscript in " "Fortran, aborting"); case is_language_fortran95: { bool allocatable_p = expression_allocatable_data_access_p(a); pips_assert("We don't know how to prettyprint a subscript in Fortran95 " "and it's not an allocatable", allocatable_p ); pc = gen_nconc(pc, words_expression(a, pdl)); if(!ENDP(lexp)) { pc = CHAIN_SWORD(pc,"("); } break; } case is_language_c: pc = CHAIN_SWORD(pc,"("); pc = gen_nconc(pc, words_expression(a, pdl)); pc = CHAIN_SWORD(pc,")"); if(!ENDP(lexp)) { pc = CHAIN_SWORD(pc,"["); } break; default: pips_internal_error("Language unknown !"); break; } /* Print now the indices list */ FOREACH(expression,exp,lexp) { if(!first) { switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: pc = CHAIN_SWORD(pc, ","); break; case is_language_c: pc = CHAIN_SWORD(pc,"]["); break; default: pips_internal_error("Language unknown !"); break; } } pc = gen_nconc(pc, words_expression(exp, pdl)); first = false; } /* Here we differentiate the indices syntax */ switch(get_prettyprint_language_tag()) { case is_language_fortran: case is_language_fortran95: if(!ENDP(lexp)) { pc = CHAIN_SWORD(pc,")"); } break; case is_language_c: if(!ENDP(lexp)) { pc = CHAIN_SWORD(pc,"]"); } break; default: pips_internal_error("Language unknown !"); break; } return pc; } static list words_application(application a, list pdl) { list pc = NIL; expression f = application_function(a); list lexp = application_arguments(a); bool first = true; /* Parentheses must be added for function expression */ pc = CHAIN_SWORD(pc,"("); pc = gen_nconc(pc, words_expression(f, pdl)); pc = CHAIN_SWORD(pc,")("); MAP(EXPRESSION,exp, { if (!first) pc = CHAIN_SWORD(pc,","); pc = gen_nconc(pc, words_expression(exp, pdl)); first = false; },lexp); pc = CHAIN_SWORD(pc,")"); return pc; } static text text_forloop(entity module, const char* label, int margin, forloop obj, int n, list pdl) { list pc = NIL; unformatted u; text r = make_text(NIL); statement body = forloop_body(obj) ; //instruction i = statement_instruction(body); bool braces_p = !one_liner_p(body) || prettyprint_all_c_braces_p; pc = CHAIN_SWORD(pc,"for ("); if (!expression_undefined_p(forloop_initialization(obj))) pc = gen_nconc(pc, words_expression(forloop_initialization(obj), pdl)); pc = CHAIN_SWORD(pc,C_STATEMENT_END_STRING); if (!expression_undefined_p(forloop_condition(obj))) { /* To restitute for(;;) */ expression cond = forloop_condition(obj); if(!expression_one_p(cond)) pc = gen_nconc(pc, words_expression(forloop_condition(obj), pdl)); } pc = CHAIN_SWORD(pc,C_STATEMENT_END_STRING); if (!expression_undefined_p(forloop_increment(obj))) pc = gen_nconc(pc, words_expression(forloop_increment(obj), pdl)); pc = CHAIN_SWORD(pc,!braces_p?")":") {"); u = make_unformatted(strdup(label), n, margin, pc) ; ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u)); if(!braces_p) { MERGE_TEXTS(r, text_statement_enclosed(module, margin+INDENTATION, body, !one_liner_p(body), !one_liner_p(body), pdl)); } else { // ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"{")); MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, body, pdl)); ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}")); } return r; }
GB_reduce_each_index.c
//------------------------------------------------------------------------------ // GB_reduce_each_index: T(i)=reduce(A(i,:)), reduce a matrix to a vector //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Reduce a matrix to a vector. All entries in A(i,:) are reduced to T(i). // First, all threads reduce their slice to their own workspace, operating on // roughly the same number of entries each. The vectors in A are ignored; the // reduction only depends on the indices. Next, the threads cooperate to // reduce all workspaces to the workspace of thread 0. Finally, this last // workspace is collected into T. { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const GB_ATYPE *restrict Ax = A->x ; const int64_t *restrict Ai = A->i ; const int64_t n = A->vlen ; size_t zsize = ttype->size ; //-------------------------------------------------------------------------- // allocate workspace for each thread //-------------------------------------------------------------------------- GB_CTYPE *Works [nth] ; bool *Marks [nth] ; bool ok = true ; // This does not need to be parallel. The calloc does not take O(n) time. for (int tid = 0 ; tid < nth ; tid++) { GB_MALLOC_MEMORY (Works [tid], n, zsize) ; GB_CALLOC_MEMORY (Marks [tid], n, sizeof (bool)) ; ok = ok && (Works [tid] != NULL && Marks [tid] != NULL) ; } if (!ok) { // out of memory for (int tid = 0 ; tid < nth ; tid++) { GB_FREE_MEMORY (Works [tid], n, zsize) ; GB_FREE_MEMORY (Marks [tid], n, sizeof (bool)) ; } return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // reduce each slice in its own workspace //-------------------------------------------------------------------------- int64_t Tnz [nth] ; // each thread reduces its own slice in parallel #pragma omp parallel for num_threads(nth) schedule(static) for (int tid = 0 ; tid < nth ; tid++) { //---------------------------------------------------------------------- // get the workspace for this thread //---------------------------------------------------------------------- GB_CTYPE *restrict Work = Works [tid] ; bool *restrict Mark = Marks [tid] ; int64_t my_tnz = 0 ; //---------------------------------------------------------------------- // reduce the entries //---------------------------------------------------------------------- for (int64_t p = pstart_slice [tid] ; p < pstart_slice [tid+1] ;p++) { int64_t i = Ai [p] ; // ztype aij = (ztype) Ax [p], with typecast GB_SCALAR (aij) ; GB_CAST_ARRAY_TO_SCALAR (aij, Ax, p) ; if (!Mark [i]) { // first time index i has been seen // Work [i] = aij ; no typecast GB_COPY_SCALAR_TO_ARRAY (Work, i, aij) ; Mark [i] = true ; my_tnz++ ; } else { // Work [i] += aij ; no typecast GB_ADD_SCALAR_TO_ARRAY (Work, i, aij) ; } } Tnz [tid] = my_tnz ; } //-------------------------------------------------------------------------- // reduce all workspace to Work [0] and count # entries in T //-------------------------------------------------------------------------- GB_CTYPE *restrict Work0 = Works [0] ; bool *restrict Mark0 = Marks [0] ; int64_t tnz = Tnz [0] ; if (nth > 1) { #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:tnz) for (int64_t i = 0 ; i < n ; i++) { for (int tid = 1 ; tid < nth ; tid++) { const bool *restrict Mark = Marks [tid] ; if (Mark [i]) { // thread tid has a contribution to index i const GB_CTYPE *restrict Work = Works [tid] ; if (!Mark0 [i]) { // first time index i has been seen // Work0 [i] = Work [i] ; no typecast GB_COPY_ARRAY_TO_ARRAY (Work0, i, Work, i) ; Mark0 [i] = true ; tnz++ ; } else { // Work0 [i] += Work [i] ; no typecast GB_ADD_ARRAY_TO_ARRAY (Work0, i, Work, i) ; } } } } // free all but workspace for thread 0 for (int tid = 1 ; tid < nth ; tid++) { GB_FREE_MEMORY (Works [tid], n, zsize) ; GB_FREE_MEMORY (Marks [tid], n, sizeof (bool)) ; } } //-------------------------------------------------------------------------- // allocate T //-------------------------------------------------------------------------- // since T is a GrB_Vector, it is CSC and not hypersparse GB_CREATE (&T, ttype, n, 1, GB_Ap_calloc, true, GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, tnz, true, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_MEMORY (Works [0], n, zsize) ; GB_FREE_MEMORY (Marks [0], n, sizeof (bool)) ; return (GB_OUT_OF_MEMORY) ; } T->p [0] = 0 ; T->p [1] = tnz ; int64_t *restrict Ti = T->i ; GB_CTYPE *restrict Tx = T->x ; T->nvec_nonempty = (tnz > 0) ? 1 : 0 ; //-------------------------------------------------------------------------- // gather the results into T //-------------------------------------------------------------------------- if (tnz == n) { //---------------------------------------------------------------------- // T is dense: transplant Work0 into T->x //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t i = 0 ; i < n ; i++) { Ti [i] = i ; } GB_FREE_MEMORY (T->x, n, zsize) ; T->x = Work0 ; Work0 = NULL ; } else { //---------------------------------------------------------------------- // T is sparse: gather from Work0 and Mark0 //---------------------------------------------------------------------- if (nthreads == 1) { //------------------------------------------------------------------ // gather sparse T using a single thread //------------------------------------------------------------------ int64_t p = 0 ; for (int64_t i = 0 ; i < n ; i++) { if (Mark0 [i]) { Ti [p] = i ; // Tx [p] = Work0 [i], no typecast GB_COPY_ARRAY_TO_ARRAY (Tx, p, Work0, i) ; p++ ; } } ASSERT (p == tnz) ; } else { //------------------------------------------------------------------ // gather sparse T using multiple threads //------------------------------------------------------------------ // Some tasks may be completely empty and thus take no time at all; // 256 tasks per thread are created for better load balancing. int ntasks = 256 * nthreads ; ntasks = GB_IMIN (ntasks, n) ; int64_t Count [ntasks+1] ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic) for (int taskid = 0 ; taskid < ntasks ; taskid++) { int64_t ifirst, ilast, p = 0 ; GB_PARTITION (ifirst, ilast, n, taskid, ntasks) ; for (int64_t i = ifirst ; i < ilast ; i++) { p += Mark0 [i] ; } Count [taskid] = p ; } GB_cumsum (Count, ntasks, NULL, 1) ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic) for (int64_t taskid = 0 ; taskid < ntasks ; taskid++) { int64_t ifirst, ilast, p = Count [taskid] ; int64_t my_count = (Count [taskid+1] - p) ; GB_PARTITION (ifirst, ilast, n, taskid, ntasks) ; if (my_count > 0) { for (int64_t i = ifirst ; i < ilast ; i++) { if (Mark0 [i]) { Ti [p] = i ; // Tx [p] = Work0 [i], no typecast GB_COPY_ARRAY_TO_ARRAY (Tx, p, Work0, i) ; p++ ; } } } } #ifdef GB_DEBUG // check result using a single thread int64_t p = 0 ; for (int64_t i = 0 ; i < n ; i++) { if (Mark0 [i]) { ASSERT (Ti [p] == i) ; p++ ; } } ASSERT (p == tnz) ; #endif } } //-------------------------------------------------------------------------- // free workspace for thread 0 //-------------------------------------------------------------------------- GB_FREE_MEMORY (Work0, n, zsize) ; GB_FREE_MEMORY (Mark0, n, sizeof (bool)) ; }
nn_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #ifndef FLANN_NNINDEX_H #define FLANN_NNINDEX_H #include <vector> #include "flann/general.h" #include "flann/util/matrix.h" #include "flann/util/params.h" #include "flann/util/result_set.h" #include "flann/util/dynamic_bitset.h" #include "flann/util/saving.h" namespace flann { #define KNN_HEAP_THRESHOLD 250 class IndexBase { public: virtual ~IndexBase() {}; virtual size_t veclen() const = 0; virtual size_t size() const = 0; virtual flann_algorithm_t getType() const = 0; virtual int usedMemory() const = 0; virtual IndexParams getParameters() const = 0; virtual void loadIndex(FILE* stream) = 0; virtual void saveIndex(FILE* stream) = 0; }; /** * Nearest-neighbour index base class */ template <typename Distance> class NNIndex : public IndexBase { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; typedef Eigen::Matrix<ElementType, Eigen::Dynamic, Eigen::Dynamic> MatrixType; typedef Eigen::Matrix<ElementType, Eigen::Dynamic, 1> VectorType; typedef Eigen::Matrix<DistanceType, Eigen::Dynamic, Eigen::Dynamic> DistanceMatrix; typedef Eigen::Matrix<DistanceType, Eigen::Dynamic, 1> DistanceVector; typedef Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic> IndexMatrix; typedef Eigen::Matrix<size_t, Eigen::Dynamic, 1> IndexVector; NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const NNIndex& other) : distance_(other.distance_), last_id_(other.last_id_), size_(other.size_), size_at_build_(other.size_at_build_), veclen_(other.veclen_), index_params_(other.index_params_), removed_(other.removed_), removed_points_(other.removed_points_), removed_count_(other.removed_count_), ids_(other.ids_), points_(other.points_), data_ptr_(NULL) { if (other.data_ptr_) { data_ptr_ = new ElementType[size_*veclen_]; std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } } virtual ~NNIndex() { if (data_ptr_) { delete[] data_ptr_; } } virtual NNIndex* clone() const = 0; /** * Builds the index */ virtual void buildIndex() { freeIndex(); cleanRemovedPoints(); // building index buildIndexImpl(); size_at_build_ = size_; } /** * Builds the index using the specified dataset * @param dataset the dataset to use */ virtual void buildIndex(const Matrix<ElementType>& dataset) { setDataset(dataset); this->buildIndex(); } /** * @brief Incrementally add points to the index. * @param points Matrix with points to be added * @param rebuild_threshold */ virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { throw FLANNException("Functionality not supported by this index"); } /** * Remove point from the index * @param index Index of point to be removed */ virtual void removePoint(size_t id) { if (!removed_) { ids_.resize(size_); for (size_t i=0;i<size_;++i) { ids_[i] = i; } removed_points_.resize(size_); removed_points_.reset(); last_id_ = size_; removed_ = true; } size_t point_index = id_to_index(id); if (point_index!=size_t(-1) && !removed_points_.test(point_index)) { removed_points_.set(point_index); removed_count_++; } } /** * Get point with specific id * @param id * @return */ virtual ElementType* getPoint(size_t id) { size_t index = id_to_index(id); if (index!=size_t(-1)) { return points_[index]; } else { return NULL; } } /** * @return number of features in this index. */ inline size_t size() const { return size_ - removed_count_; } /** * @return The dimensionality of the features in this index. */ inline size_t veclen() const { return veclen_; } /** * Returns the parameters used by the index. * * @return The index parameters */ IndexParams getParameters() const { return index_params_; } template<typename Archive> void serialize(Archive& ar) { IndexHeader header; if (Archive::is_saving::value) { header.h.data_type = flann_datatype_value<ElementType>::value; header.h.index_type = getType(); header.h.rows = size_; header.h.cols = veclen_; } ar & header; // sanity checks if (Archive::is_loading::value) { if (strncmp(header.h.signature, FLANN_SIGNATURE_, strlen(FLANN_SIGNATURE_)) != 0) { throw FLANNException("Invalid index file, wrong signature"); } if (header.h.data_type != flann_datatype_value<ElementType>::value) { throw FLANNException("Datatype of saved index is different than of the one to be created."); } if (header.h.index_type != getType()) { throw FLANNException("Saved index type is different then the current index type."); } // TODO: check for distance type } ar & size_; ar & veclen_; ar & size_at_build_; bool save_dataset; if (Archive::is_saving::value) { save_dataset = get_param(index_params_,"save_dataset", false); } ar & save_dataset; if (save_dataset) { if (Archive::is_loading::value) { if (data_ptr_) { delete[] data_ptr_; } data_ptr_ = new ElementType[size_*veclen_]; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } for (size_t i=0;i<size_;++i) { ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType)); } } else { if (points_.size()!=size_) { throw FLANNException("Saved index does not contain the dataset and no dataset was provided."); } } ar & last_id_; ar & ids_; ar & removed_; if (removed_) { ar & removed_points_; } ar & removed_count_; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = knnSearch(queries, indices_, dists, knn, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = knnSearch(queries, indices_, dists, knn, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indices of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; size_t num_neighbors = std::min(indices.cols, dists.cols); int max_neighbors = params.max_neighbors; if (max_neighbors<0) max_neighbors = num_neighbors; else max_neighbors = std::min(max_neighbors,(int)num_neighbors); if (max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { // explicitly indicated to use unbounded radius result set // and we know there'll be enough room for resulting indices and dists if (params.max_neighbors<0 && (num_neighbors>=size())) { #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if (n>num_neighbors) n = num_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>max_neighbors) n = max_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = radiusSearch(queries, indices_, dists, radius, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indices of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; // just count neighbors if (params.max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); if (params.max_neighbors<0) { // search for all neighbors #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>params.max_neighbors) n = params.max_neighbors; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = radiusSearch(queries, indices_, dists, radius, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0; protected: virtual void freeIndex() = 0; virtual void buildIndexImpl() = 0; size_t id_to_index(size_t id) { if (ids_.size()==0) { return id; } size_t point_index = size_t(-1); if (id < ids_.size() && ids_[id]==id) { return id; } else { // binary search size_t start = 0; size_t end = ids_.size(); while (start<end) { size_t mid = (start+end)/2; if (ids_[mid]==id) { point_index = mid; break; } else if (ids_[mid]<id) { start = mid + 1; } else { end = mid; } } } return point_index; } void indices_to_ids(const size_t* in, size_t* out, size_t size) const { if (removed_) { for (size_t i=0;i<size;++i) { out[i] = ids_[in[i]]; } } } void setDataset(const Matrix<ElementType>& dataset) { size_ = dataset.rows; veclen_ = dataset.cols; last_id_ = 0; ids_.clear(); removed_points_.clear(); removed_ = false; removed_count_ = 0; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = dataset[i]; } } void extendDataset(const Matrix<ElementType>& new_points) { size_t new_size = size_ + new_points.rows; if (removed_) { removed_points_.resize(new_size); ids_.resize(new_size); } points_.resize(new_size); for (size_t i=size_;i<new_size;++i) { points_[i] = new_points[i-size_]; if (removed_) { ids_[i] = last_id_++; removed_points_.reset(i); } } size_ = new_size; } void cleanRemovedPoints() { if (!removed_) return; size_t last_idx = 0; for (size_t i=0;i<size_;++i) { if (!removed_points_.test(i)) { points_[last_idx] = points_[i]; ids_[last_idx] = ids_[i]; removed_points_.reset(last_idx); ++last_idx; } } points_.resize(last_idx); ids_.resize(last_idx); removed_points_.resize(last_idx); size_ = last_idx; removed_count_ = 0; } void swap(NNIndex& other) { std::swap(distance_, other.distance_); std::swap(last_id_, other.last_id_); std::swap(size_, other.size_); std::swap(size_at_build_, other.size_at_build_); std::swap(veclen_, other.veclen_); std::swap(index_params_, other.index_params_); std::swap(removed_, other.removed_); std::swap(removed_points_, other.removed_points_); std::swap(removed_count_, other.removed_count_); std::swap(ids_, other.ids_); std::swap(points_, other.points_); std::swap(data_ptr_, other.data_ptr_); } protected: /** * The distance functor */ Distance distance_; /** * Each index point has an associated ID. IDs are assigned sequentially in * increasing order. This indicates the ID assigned to the last point added to the * index. */ size_t last_id_; /** * Number of points in the index (and database) */ size_t size_; /** * Number of features in the dataset when the index was last built. */ size_t size_at_build_; /** * Size of one point in the index (and database) */ size_t veclen_; /** * Parameters of the index. */ IndexParams index_params_; /** * Flag indicating if at least a point was removed from the index */ bool removed_; /** * Array used to mark points removed from the index */ DynamicBitset removed_points_; /** * Number of points removed from the index */ size_t removed_count_; /** * Array of point IDs, returned by nearest-neighbour operations */ std::vector<size_t> ids_; /** * Point data */ std::vector<ElementType*> points_; /** * Pointer to dataset memory if allocated by this index, otherwise NULL */ ElementType* data_ptr_; }; #define USING_BASECLASS_SYMBOLS \ using NNIndex<Distance>::distance_;\ using NNIndex<Distance>::size_;\ using NNIndex<Distance>::size_at_build_;\ using NNIndex<Distance>::veclen_;\ using NNIndex<Distance>::index_params_;\ using NNIndex<Distance>::removed_points_;\ using NNIndex<Distance>::ids_;\ using NNIndex<Distance>::removed_;\ using NNIndex<Distance>::points_;\ using NNIndex<Distance>::extendDataset;\ using NNIndex<Distance>::setDataset;\ using NNIndex<Distance>::cleanRemovedPoints;\ using NNIndex<Distance>::indices_to_ids; } #endif //FLANN_NNINDEX_H
resample.h
#ifndef RESAMPLE_H_ #define RESAMPLE_H_ #include <omp.h> #include <torch/extension.h> #include "nn/common/resample.h" namespace mapped_conv { namespace nn { namespace cpu { template <typename T> void ResampleToMap2D(const int64_t num_kernels, torch::Tensor data_in, torch::Tensor sample_map, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t interpolation, torch::Tensor data_out) { const T *data_in_ptr = data_in.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); T *data_out_ptr = data_out.data<T>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleToMap2D(index, data_in_ptr, sample_map_ptr, channels, in_height, in_width, out_height, out_width, interpolation, data_out_ptr); } } template <typename T> void ResampleFromMap2D(const int64_t num_kernels, torch::Tensor data_out, torch::Tensor sample_map, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t interpolation, torch::Tensor data_in) { const T *data_out_ptr = data_out.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); T *data_in_ptr = data_in.data<T>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleFromMap2D(index, data_out_ptr, sample_map_ptr, channels, in_height, in_width, out_height, out_width, interpolation, data_in_ptr); } } // -------------------------------------------- // -------------------------------------------- template <typename T> void ResampleToMap2DWeighted( const int64_t num_kernels, torch::Tensor data_in, torch::Tensor sample_map, torch::Tensor interp_weights, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t interpolation, const int64_t num_interp_pts, torch::Tensor data_out) { const T *data_in_ptr = data_in.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); const T *interp_weights_ptr = interp_weights.data<T>(); T *data_out_ptr = data_out.data<T>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ interp_weights_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleToMap2DWeighted( index, data_in_ptr, sample_map_ptr, interp_weights_ptr, channels, in_height, in_width, out_height, out_width, interpolation, num_interp_pts, data_out_ptr); } } template <typename T> void ResampleFromMap2DWeighted( const int64_t num_kernels, torch::Tensor data_out, torch::Tensor sample_map, torch::Tensor interp_weights, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t interpolation, const int64_t num_interp_pts, torch::Tensor data_in) { const T *data_out_ptr = data_out.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); const T *interp_weights_ptr = interp_weights.data<T>(); T *data_in_ptr = data_in.data<T>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ interp_weights_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleFromMap2DWeighted( index, data_out_ptr, sample_map_ptr, interp_weights_ptr, channels, in_height, in_width, out_height, out_width, interpolation, num_interp_pts, data_in_ptr); } } // -------------------------------------------- // -------------------------------------------- template <typename T> void ResampleToMap2DVoting(const int64_t num_kernels, torch::Tensor data_in, torch::Tensor sample_map, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t numCandidates, torch::Tensor data_out) { const int64_t *data_in_ptr = data_in.data<int64_t>(); const int64_t *sample_map_ptr = sample_map.data<int64_t>(); int64_t *data_out_ptr = data_out.data<int64_t>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleToMap2DVoting(index, data_in_ptr, sample_map_ptr, channels, in_height, in_width, out_height, out_width, numCandidates, data_out_ptr); } } } // namespace cpu } // namespace nn } // namespace mapped_conv #endif
strip_fmt_plug.c
/* STRIP cracker patch for JtR. Hacked together during September of * 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_strip; #elif FMT_REGISTERS_H john_register_one(&fmt_strip); #else #include "aes.h" #include <string.h> #include "stdint.h" #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "STRIP" #define FORMAT_NAME "Password Manager" #define FORMAT_TAG "$strip$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN 1 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define ITERATIONS 4000 #define FILE_HEADER_SZ 16 #define SQLITE_FILE_HEADER "SQLite format 3" #define HMAC_SALT_MASK 0x3a #define FAST_PBKDF2_ITER 2 #define SQLITE_MAX_PAGE_SIZE 65536 static struct fmt_tests strip_tests[] = { /* test vector created by STRIP for Windows */ {"$strip$*66cd7a4ff7716f7b86cf587ce18eb39518e096eb152615ada8d007d9f035c20c711e62cbde96d8c3aad2a4658497a6119addc97ed3c970580cd666f301c63ce041a1748ee5c3861ada3cd6ee75b5d68891f731b3c2e3294b08e10ce3c23c2bfac158f8c45d0332791f64d1e3ad55e936d17a42fef5228e713b8188050c9a61c7f026af6203172cf2fc54c8b439e2260d7a00a4156713f92f8466de5c05cd8701e0d3d9cb3f392ae918e6900d5363886d4e1ed7e90da76b180ef9555c1cd358f6d1ee3755a208fee4d5aa1c776a0888200b21a3da6614d5fe2303e78c09563d862d19deecdc9f0ec7fbc015689a74f4eb477d9f22298b1b3f866ca4cb772d74821a1f8d03fd5fd0d020ffd41dd449b431ddf3bbfba3399311d9827be428202ee56e2c2a4e91f3415b4282c691f16cd447cf877b576ab963ea4ea3dc7d8c433febdc36607fd2372c4165abb59e3e75c28142f1f2575ecca6d97a9f782c3410151f8bbcbc65a42fdc59fdc4ecd8214a2bbd3a4562fac21c48f7fc69a4ecbcf664b4e435d7734fde5494e4d80019a0302e22565ed6a49b29cecf81077fd92f0105d18a421e04ee0deaca6389214abc7182db7003da7e267816531010b236eadfea20509718ff743ed5ad2828b6501dd84a371feed26f0514bbda69118a69048ebb71e3e2c54fb918422f1320724a353fe8d81a562197454d2c67443be8a4008a756aec0998386a5fd48e379befe966b42dfa6684ff049a61b51de5f874a12ab7d9ab33dc84738e036e294c22a07bebcc95be9999ab988a1fa1c944ab95be970045accb661249be8cc34fcc0680cb1aff8dfee21f586c571b1d09bf370c6fc131418201e0414acb2e4005b0b6fda1f3d73b7865823a008d1d3f45492a960dbdd6331d78d9e2e6a368f08ee3456b6d78df1d5630f825c536fff60bad23fb164d151d80a03b0c78edbfdee5c7183d7527e289428cf554ad05c9d75011f6b233744f12cd85fbb62f5d1ae22f43946f24a483a64377bf3fa16bf32cea1ab4363ef36206a5989e97ff847e5d645791571b9ecd1db194119b7663897b9175dd9cc123bcc7192eaf56d4a2779c502700e88c5c20b962943084bcdf024dc4f19ca649a860bdbd8f8f9b4a9d03027ae80f4a3168fc030859acb08a871950b024d27306cdc1a408b2b3799bb8c1f4b6ac3593aab42c962c979cd9e6f59d029f8d392315830cfcf4066bf03e0fc5c0f3630e9c796ddb38f51a2992b0a61d6ef115cb34d36c7d94b6c9d49dfe8d064d92b483f12c14fa10bf1170a575e4571836cef0a1fbf9f8b6968abda5e964bb16fd62fde1d1df0f5ee9c68ce568014f46f1717b6cd948b0da9a6f4128da338960dbbcbc9c9c3b486859c06e5e2338db3458646054ccd59bb940c7fc60cda34f633c26dde83bb717b75fefcbd09163f147d59a6524752a47cd94", "openwall"}, /* test vector created by STRIP Password Manager (for Android) */ {"$strip$*78adb0052203efa1bd1b02cac098cc9af1bf7e84ee2eaebaaba156bdcfe729ab12ee7ba8a84e79d11dbd67eee82bcb24be99dbd5db7f4c3a62f188ce4b48edf4ebf6cbf5a5869a61f83fbdb3cb4bf79b3c2c898f422d71eab31afdf3a8d4e97204dedbe7bd8b5e4c891f4880ca917c8b2f67ca06035e7f8db1fae91c45db6a08adf96ec5ddcb9e60b648acf883a7550ea5b67e2d27623e8de315f29cba48b8b1d1bde62283615ab88293b29ad73ae404a42b13e35a95770a504d81e335c00328a6290e411fa2708a697fab7c2d17ff5d0a3fe508118bb43c3d5e72ef563e0ffd337f559085a1373651ca2b8444f4437d8ac0c19aa0a24b248d1d283062afbc3b4ccc9b1861f59518eba771f1d9707affe0222ff946da7c014265ab4ba1f6417dd22d92e4adf5b7e462588f0a42e061a3dad041cbb312d8862aed3cf490df50b710a695517b0c8771a01f82db09231d392d825f5667012e349d2ed787edf8448bbb1ff548bee3a33392cd209e8b6c1de8202f6527d354c3858b5e93790c4807a8967b4c0321ed3a1d09280921650ac33308bd04f35fb72d12ff64a05300053358c5d018a62841290f600f7df0a7371b6fac9b41133e2509cb90f774d02e7202185b9641d063ed38535afb81590bfd5ad9a90107e4ff6d097ac8f35435f307a727f5021f190fc157956414bfce4818a1e5c6af187485683498dcc1d56c074c534a99125c6cfbf5242087c6b0ae10971b0ff6114a93616e1a346a22fcac4c8f6e5c4a19f049bbc7a02d2a31d39548f12440c36dbb253299a11b630e8fd88e7bfe58545d60dce5e8566a0a190d816cb775bd859b8623a7b076bce82c52e9cff6a2d221f9d3fd888ac30c7e3000ba8ed326881ffe911e27bb8982b56caa9a12065721269976517d2862e4a486b7ed143ee42c6566bba04c41c3371220f4843f26e328c33a5fb8450dadc466202ffc5c49cc95827916771e49e0602c3f8468537a81cf2fa1db34c090fccab6254436c05657cf29c3c415bb22a42adeac7870858bf96039b81c42c3d772509fdbe9a94eaf99ee9c59bac3ea97da31e9feac14ed53a0af5c5ebd2e81e40a5140da4f8a44048d5f414b0ba9bfb8024c7abaf5346fde6368162a045d1196f81d55ed746cc6cbd7a7c9cdbfa392279169626437da15a62730c2990772e106a5b84a60edaa6c5b8030e1840aa6361f39a12121a1e33b9e63fb2867d6241de1fb6e2cd1bd9a78c7122258d052ea53a4bff4e097ed49fc17b9ec196780f4c6506e74a5abb10c2545e6f7608d2eefad179d54ad31034576be517affeb3964c65562538dd6ea7566a52c75e4df593895539609a44097cb6d31f438e8f7717ce2bf777c76c22d60b15affeb89f08084e8f316be3f4aefa4fba8ec2cc1dc845c7affbc0ce5ebccdbfde5ebab080a285f02bdfb76c6dbd243e5ee1e5d", "p@$$w0rD"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned char salt[16]; unsigned char data[1024]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$strip$" and first '*' */ if ((p = strtokm(ctcopy, "*")) == NULL) /* salt + data */ goto err; if (hexlenl(p, &extra) != 2048 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$strip$" and first '*' */ p = strtokm(ctcopy, "*"); for (i = 0; i < 16; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; for (; i < 1024; i++) cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } /* verify validity of page */ static int verify_page(unsigned char *page1) { uint32_t pageSize; uint32_t usableSize; if (memcmp(page1, SQLITE_FILE_HEADER, 16) != 0) { return -1; } if (page1[19] > 2) { return -1; } if (memcmp(&page1[21], "\100\040\040", 3) != 0) { return -1; } pageSize = (page1[16] << 8) | (page1[17] << 16); if (((pageSize - 1) & pageSize) != 0 || pageSize > SQLITE_MAX_PAGE_SIZE || pageSize <= 256) { return -1; } if ((pageSize & 7) != 0) { return -1; } usableSize = pageSize - page1[20]; if (usableSize < 480) { return -1; } return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char master[MAX_KEYS_PER_CRYPT][32]; unsigned char output[1024]; unsigned char *iv_in; unsigned char iv_out[16]; int size,i; int page_sz = 1008; /* 1024 - strlen(SQLITE_FILE_HEADER) */ int reserve_sz = 16; /* for HMAC off case */ AES_KEY akey; #ifdef SIMD_COEF_32 int len[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; pout[i] = master[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 16, ITERATIONS, pout, 32, 0); #else pbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt, 16, ITERATIONS, master[0], 32, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { memcpy(output, SQLITE_FILE_HEADER, FILE_HEADER_SZ); size = page_sz - reserve_sz; iv_in = cur_salt->data + size + 16; memcpy(iv_out, iv_in, 16); if (AES_set_decrypt_key(master[i], 256, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed!\n"); } /* decrypting 24 bytes is enough */ AES_cbc_encrypt(cur_salt->data + 16, output + 16, 24, &akey, iv_out, AES_DECRYPT); if (verify_page(output) == 0) { cracked[index+i] = 1; } else cracked[index+i] = 0; } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void strip_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_strip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, strip_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, strip_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
openmp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char *argv[]) { int iam = 0, np = 1; if (!getenv("OMP_NUM_THREADS")) omp_set_num_threads(4); #pragma omp parallel default(shared) private(iam, np) { #if defined(_OPENMP) np = omp_get_num_threads(); iam = omp_get_thread_num(); #endif printf("Hello from thread %d out of %d\n", iam, np); } return 0; }
GB_unaryop__lnot_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int32_fp64 // op(A') function: GB_tran__lnot_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int32_t z ; GB_CAST_SIGNED(z,aij,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int32_fp64 ( int32_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trmm_x_csr_u_lo_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { for (ALPHA_INT cr = 0; cr < mat->rows; ++cr) { alpha_mule(y[index2(cc, cr, ldy)], beta); alpha_madde(y[index2(cc, cr, ldy)], alpha, x[index2(cc, cr, ldx)]); ALPHA_Number ctmp; alpha_setzero(ctmp); for (ALPHA_INT ai = mat->rows_start[cr]; ai < mat->rows_end[cr]; ++ai) { ALPHA_INT ac = mat->col_indx[ai]; if (ac < cr) { alpha_madde(ctmp, mat->values[ai], x[index2(cc, ac, ldx)]); } } alpha_madde(y[index2(cc, cr, ldy)], alpha, ctmp); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_unop__identity_bool_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_uint16) // op(A') function: GB (_unop_tran__identity_bool_uint16) // C type: bool // A type: uint16_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_uint16) ( bool *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
single_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s void foo(); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single foo void test_no_clause() { int i; #pragma omp single foo(); #pragma omp single ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp single { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single foo bar foo(); } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single; foo(); #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single linear(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single private(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single, private(x); foo(); } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp single private( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single private(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single private(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single private(0) foo(); int x, y, z; #pragma omp parallel #pragma omp single private(x) foo(); #pragma omp parallel #pragma omp single private(x, y) foo(); #pragma omp parallel #pragma omp single private(x, y, z) foo(); } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp single firstprivate( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single firstprivate(0) foo(); } void test_nowait() { #pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}} for (int i = 0; i < 16; ++i) ; }
Par-05-LoopOmpParallelFor.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; for (int i = 0; i < 1; ++i) { if (i < 2) { return -1; } } #pragma omp parallel for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; } return 0; }
mcf_openmesh.h
#pragma once #include "../common/openmesh_report.h" #include "../common/openmesh_trimesh.h" #include "mcf_util.h" #include "rxmesh/util/timer.h" #include "rxmesh/util/vector.h" /** * axpy3() */ template <typename T> void axpy3(const std::vector<std::vector<T>>& X, const T alpha, const T beta, std::vector<std::vector<T>>& Y, const int num_omp_threads) { // Y = beta*Y + alpha*X int size = static_cast<int>(X.size()); #pragma omp parallel for schedule(static) num_threads(num_omp_threads) for (int i = 0; i < size; ++i) { Y[i][0] *= beta; Y[i][1] *= beta; Y[i][2] *= beta; Y[i][0] += alpha * X[i][0]; Y[i][1] += alpha * X[i][1]; Y[i][2] += alpha * X[i][2]; } } /** * dot3() */ template <typename T> T dot3(const std::vector<std::vector<T>>& A, const std::vector<std::vector<T>>& B, const int num_omp_threads) { T ret = 0; int size = static_cast<int>(A.size()); #pragma omp parallel for schedule(static) num_threads(num_omp_threads) reduction(+ : ret) for (int i = 0; i < size; ++i) { T partial = 0; for (size_t j = 0; j < A[i].size(); ++j) { partial += A[i][j] * B[i][j]; } ret += partial; } return ret; } /** * partial_voronoi_area() */ template <typename T> T partial_voronoi_area(const int p_id, // center const int q_id, // before center const int r_id, // after center const TriMesh& mesh) { // compute partial Voronoi area of the center vertex that is associated with // the triangle p->q->r (oriented ccw) TriMesh::VertexIter p_it = mesh.vertices_begin() + p_id; TriMesh::VertexIter q_it = mesh.vertices_begin() + q_id; TriMesh::VertexIter r_it = mesh.vertices_begin() + r_id; assert((*p_it).idx() == p_id); assert((*q_it).idx() == q_id); assert((*r_it).idx() == r_id); const rxmesh::Vector<3, T> p( mesh.point(*p_it)[0], mesh.point(*p_it)[1], mesh.point(*p_it)[2]); const rxmesh::Vector<3, T> q( mesh.point(*q_it)[0], mesh.point(*q_it)[1], mesh.point(*q_it)[2]); const rxmesh::Vector<3, T> r( mesh.point(*r_it)[0], mesh.point(*r_it)[1], mesh.point(*r_it)[2]); return partial_voronoi_area(p, q, r); } /** * edge_cotan_weight() */ template <typename T> T edge_cotan_weight(const int p_id, const int r_id, const int q_id, const int s_id, const TriMesh& mesh) { // Get the edge weight between the two verteices p-r where // q and s composes the diamond around p-r TriMesh::VertexIter p_it = mesh.vertices_begin() + p_id; TriMesh::VertexIter r_it = mesh.vertices_begin() + r_id; TriMesh::VertexIter q_it = mesh.vertices_begin() + q_id; TriMesh::VertexIter s_it = mesh.vertices_begin() + s_id; const rxmesh::Vector<3, T> p( mesh.point(*p_it)[0], mesh.point(*p_it)[1], mesh.point(*p_it)[2]); const rxmesh::Vector<3, T> r( mesh.point(*r_it)[0], mesh.point(*r_it)[1], mesh.point(*r_it)[2]); const rxmesh::Vector<3, T> q( mesh.point(*q_it)[0], mesh.point(*q_it)[1], mesh.point(*q_it)[2]); const rxmesh::Vector<3, T> s( mesh.point(*s_it)[0], mesh.point(*s_it)[1], mesh.point(*s_it)[2]); return edge_cotan_weight(p, r, q, s); } template <typename T> void mcf_matvec(TriMesh& mesh, const std::vector<std::vector<T>>& in, std::vector<std::vector<T>>& out, const int num_omp_threads) { // Matrix vector multiplication operation based on uniform Laplacian weight // defined in Equation 7 in Implicit Fairing of Irregular Meshes using // Diffusion and Curvature Flow paper // Ideally we should compute the vertex weight first in one loop over the // one-ring and then do another loop to do the matvect operation. We choose // to optimize this by saving one loop and incrementally compute the vertex // weight. Note the vertex weight in case of uniform Laplace is the valence // inversed, otherwise it is 0.5/voronoi_area. We build this voronoi_area // incrementally which makes the code looks a bit ugly. // To compute the vertex cotan weight, we use the following configuration // where P is the center vertex we want to compute vertex weight for. // Looping over P's one ring should gives q->r->s. /* r / | \ / | \ s | q \ | / \ | / p */ #pragma omp parallel for schedule(static) num_threads(num_omp_threads) for (int p_id = 0; p_id < int(mesh.n_vertices()); ++p_id) { TriMesh::VertexIter p_iter = mesh.vertices_begin() + p_id; // Off-diagonal entries rxmesh::Vector<3, T> x(T(0)); T sum_e_weight(0); // vertex weight T v_weight(0); // The last vertex in the one ring TriMesh::VertexVertexIter q_iter = mesh.vv_iter(*p_iter); --q_iter; assert(q_iter.is_valid()); // the second vertex in the one ring TriMesh::VertexVertexIter s_iter = mesh.vv_iter(*p_iter); ++s_iter; assert(s_iter.is_valid()); for (TriMesh::VertexVertexIter r_iter = mesh.vv_iter(*p_iter); r_iter.is_valid(); ++r_iter) { int r_id = (*r_iter).idx(); T e_weight = 0; if (Arg.use_uniform_laplace) { e_weight = 1; } else { e_weight = std::max( T(0.0), edge_cotan_weight<T>( p_id, r_id, (*q_iter).idx(), (*s_iter).idx(), mesh)); ++s_iter; } e_weight *= static_cast<T>(Arg.time_step); sum_e_weight += e_weight; x[0] -= e_weight * in[r_id][0]; x[1] -= e_weight * in[r_id][1]; x[2] -= e_weight * in[r_id][2]; if (Arg.use_uniform_laplace) { ++v_weight; } else { T tri_area = partial_voronoi_area<T>(p_id, (*q_iter).idx(), r_id, mesh); v_weight += (tri_area > 0) ? tri_area : 0; q_iter++; assert(q_iter == r_iter); } } // Diagonal entry if (Arg.use_uniform_laplace) { v_weight = 1.0 / v_weight; } else { v_weight = 0.5 / v_weight; } assert(!std::isnan(v_weight)); assert(!std::isinf(v_weight)); T diag = ((1.0 / v_weight) + sum_e_weight); out[p_id][0] = x[0] + diag * in[p_id][0]; out[p_id][1] = x[1] + diag * in[p_id][1]; out[p_id][2] = x[2] + diag * in[p_id][2]; } } /** * cg() */ template <typename T> void cg(TriMesh& mesh, std::vector<std::vector<T>>& X, std::vector<std::vector<T>>& B, std::vector<std::vector<T>>& R, std::vector<std::vector<T>>& P, std::vector<std::vector<T>>& S, uint32_t& num_cg_iter_taken, T& start_residual, T& stop_residual, const int num_omp_threads) { // CG solver. Solve for the three coordinates simultaneously // s = Ax mcf_matvec(mesh, X, S, num_omp_threads); // r = b - s = b - Ax // p = r #pragma omp parallel for schedule(static) num_threads(num_omp_threads) for (int i = 0; i < int(mesh.n_vertices()); ++i) { R[i][0] = B[i][0] - S[i][0]; R[i][1] = B[i][1] - S[i][1]; R[i][2] = B[i][2] - S[i][2]; P[i][0] = R[i][0]; P[i][1] = R[i][1]; P[i][2] = R[i][2]; } // delta_new = <r,r> T delta_new = dot3(R, R, num_omp_threads); // delta_0 = delta_new const T delta_0(delta_new); start_residual = delta_0; uint32_t iter = 0; while (iter < Arg.max_num_cg_iter) { // s = Ap mcf_matvec(mesh, P, S, num_omp_threads); // alpha = delta_new / <s,p> T alpha = dot3(S, P, num_omp_threads); alpha = delta_new / alpha; // x = x + alpha*p axpy3(P, alpha, T(1), X, num_omp_threads); // r = r - alpha*s axpy3(S, -alpha, T(1), R, num_omp_threads); // delta_old = delta_new T delta_old(delta_new); // delta_new = <r,r> delta_new = dot3(R, R, num_omp_threads); // beta = delta_new/delta_old T beta(delta_new / delta_old); // exit if error is getting too low across three coordinates if (delta_new < Arg.cg_tolerance * Arg.cg_tolerance * delta_0) { break; } // p = beta*p + r axpy3(R, T(1), beta, P, num_omp_threads); ++iter; } num_cg_iter_taken = iter; stop_residual = delta_new; } /** * implicit_smoothing() */ template <typename T> void implicit_smoothing(TriMesh& mesh, std::vector<std::vector<T>>& X, uint32_t& num_cg_iter_taken, float& time, T& start_residual, T& stop_residual, const int num_omp_threads) { for (TriMesh::VertexIter v_it = mesh.vertices_begin(); v_it != mesh.vertices_end(); ++v_it) { ASSERT_FALSE(mesh.is_boundary(*v_it)) << "OpenMesh MCF only takes watertight/closed mesh without " "boundaries"; } // CG containers std::vector<std::vector<T>> B(X), R(X), P(X), S(X); #pragma omp parallel for for (uint32_t v_id = 0; v_id < mesh.n_vertices(); ++v_id) { TriMesh::VertexIter v_iter = mesh.vertices_begin() + v_id; // LHS X[v_id][0] = mesh.point(*v_iter)[0]; X[v_id][1] = mesh.point(*v_iter)[1]; X[v_id][2] = mesh.point(*v_iter)[2]; // RHS T v_weight = 1; if (Arg.use_uniform_laplace) { v_weight = static_cast<T>(mesh.valence(*v_iter)); } // will fix it later for cotan weight B[v_id][0] = X[v_id][0] * v_weight; B[v_id][1] = X[v_id][1] * v_weight; B[v_id][2] = X[v_id][2] * v_weight; } if (!Arg.use_uniform_laplace) { // fix RHS (B) #pragma omp parallel for for (int v_id = 0; v_id < int(mesh.n_vertices()); ++v_id) { TriMesh::VertexIter v_iter = mesh.vertices_begin() + v_id; T v_weight(0); TriMesh::VertexVertexIter q_iter = mesh.vv_iter(*v_iter); --q_iter; assert(q_iter.is_valid()); for (TriMesh::VertexVertexIter vv_iter = mesh.vv_iter(*v_iter); vv_iter.is_valid(); ++vv_iter) { T tri_area = partial_voronoi_area<T>( v_id, (*q_iter).idx(), (*vv_iter).idx(), mesh); v_weight += (tri_area > 0) ? tri_area : 0; q_iter++; assert(q_iter == vv_iter); } v_weight = 0.5 / v_weight; B[v_id][0] = X[v_id][0] / v_weight; B[v_id][1] = X[v_id][1] / v_weight; B[v_id][2] = X[v_id][2] / v_weight; } } num_cg_iter_taken = 0; // solve rxmesh::CPUTimer timer; timer.start(); cg(mesh, X, B, R, P, S, num_cg_iter_taken, start_residual, stop_residual, num_omp_threads); timer.stop(); time = timer.elapsed_millis(); } template <typename T> void mcf_openmesh(const int num_omp_threads, TriMesh& input_mesh, std::vector<std::vector<T>>& smoothed_coord) { // Report OpenMeshReport report("MCF_OpenMesh"); report.command_line(Arg.argc, Arg.argv); report.system(); report.model_data(Arg.obj_file_name, input_mesh); std::string method = "OpenMesh " + std::to_string(num_omp_threads) + " Core"; report.add_member("method", method); report.add_member("time_step", Arg.time_step); report.add_member("cg_tolerance", Arg.cg_tolerance); report.add_member("use_uniform_laplace", Arg.use_uniform_laplace); report.add_member("max_num_cg_iter", Arg.max_num_cg_iter); // implicit smoothing uint32_t num_cg_iter_taken = 0; float time = 0; T start_residual; T stop_residual; implicit_smoothing(input_mesh, smoothed_coord, num_cg_iter_taken, time, start_residual, stop_residual, num_omp_threads); RXMESH_TRACE( "mcf_openmesh() took {} (ms) and {} iterations (i.e., {} ms/iter) ", time, num_cg_iter_taken, time / float(num_cg_iter_taken)); // write output //#pragma omp parallel for // for (int v_id = 0; v_id < int(input_mesh.n_vertices()); ++v_id) { // TriMesh::VertexIter v_iter = input_mesh.vertices_begin() + v_id; // input_mesh.point(*v_iter)[0] = smoothed_coord[v_id][0]; // input_mesh.point(*v_iter)[1] = smoothed_coord[v_id][1]; // input_mesh.point(*v_iter)[2] = smoothed_coord[v_id][2]; // } // std::string fn = STRINGIFY(OUTPUT_DIR) "mcf_openmesh.obj"; // if (!OpenMesh::IO::write_mesh(input_mesh, fn)) { // RXMESH_WARN("OpenMesh cannot write mesh to file {}", fn); // } // Finalize report report.add_member("start_residual", start_residual); report.add_member("end_residual", stop_residual); report.add_member("num_cg_iter_taken", num_cg_iter_taken); report.add_member("total_time (ms)", time); rxmesh::TestData td; td.test_name = "MCF"; td.num_threads = num_omp_threads; td.time_ms.push_back(time / float(num_cg_iter_taken)); td.passed.push_back(true); report.add_test(td); report.write( Arg.output_folder + "/openmesh", "MCF_OpenMesh_" + rxmesh::extract_file_name(Arg.obj_file_name)); }
GB_unaryop__minv_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_int64 // op(A') function: GB_tran__minv_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_int64 ( uint64_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
django_fmt_plug.c
/* Django 1.4 patch for JtR. Hacked together during May of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format => user:$django$*type*django-hash * * Where, * * type => 1, for Django 1.4 pbkdf_sha256 hashes and * * django-hash => Second column of "SELECT username, password FROM auth_user" * * July, 2012, the oSSL PKCS5_PBKDF2_HMAC function was replaced with a much faster * function pbkdf2() designed by JimF. Originally this function was designed for * the mscash2 (DCC2). The same pbkdf2 function, is used, and simply required small * changes to use SHA256. * * This new code is 3x to 4x FASTER than the original oSSL code. Even though it is * only useing oSSL functions. A lot of the high level stuff in oSSL sux for speed. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_django; #elif FMT_REGISTERS_H john_register_one(&fmt_django); #else // uncomment this header to use the slower PKCS5_PBKDF2_HMAC function. // Note, PKCS5_PBKDF2_HMAC is ONLY available in oSSL 1.00 + (1.0c I think to be exact) //#include <openssl/evp.h> #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "base64_convert.h" #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 // tuned on core i7 #endif static int omp_t = 1; #endif #include "memdbg.h" #define FORMAT_LABEL "Django" #define FORMAT_NAME "" #define FORMAT_TAG "$django$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT " (x10000)" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define HASH_LENGTH 44 #define BINARY_SIZE 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests django_tests[] = { {"$django$*1*pbkdf2_sha256$10000$qPmFbibfAY06$x/geVEkdZSlJMqvIYJ7G6i5l/6KJ0UpvLUU6cfj83VM=", "openwall"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd7$2nTDwPhSsDKOwpKiV04teVtf+a14Rs7na/lIB3KnHkM=", "123"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd1$bkdQo9RoatRomupPFP+XEo+Guuirq4mi+R1cFcV0U3M=", "openwall"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd6$Uq33DAHOFHUED+32IIqCqm+ITU1mhsGOJ7YwFf6h+6k=", "password"}, {"$django$*1*pbkdf2_sha256$10000$34L3roCQ6ZfN$R21tJK1sIDfmj9BfBocefFfuGVwE3pXcLEhChNjc+pU=", "0123456789012345678901234567890123456789012345678901234567890123"}, {"$django$*1*pbkdf2_sha256$10000$7qPqyUDw8kZV$pFmVRjlHvayoWEy8ZWXkHgfmgImUKLmkmruclpYVAxM=", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int type; int iterations; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy;; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* type */ goto err; /* type must be 1 */ if (!isdec(p)) goto err; if (atoi(p) != 1) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* algorithm */ goto err; if (strcmp(p, "pbkdf2_sha256") != 0) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iterations */ goto err; if (!isdec(p)) // FIXME: what about iterations == 0? goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) > sizeof(cur_salt->salt)-1) goto err; if ((p = strtokm(NULL, "")) == NULL) /* hash */ goto err; if (strlen(p)-1 != base64_valid_length(p,e_b64_mime,flg_Base64_MIME_TRAIL_EQ, 0) || strlen(p)-1 > HASH_LENGTH-1) { goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char Buf[120], *ctcopy=Buf; char *p, *t; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); strncpy(Buf, ciphertext, 119); Buf[119] = 0; ctcopy += FORMAT_TAG_LEN; /* skip over "$django$*" */ p = strtokm(ctcopy, "*"); cs.type = atoi(p); strtokm(NULL, "$"); t = strtokm(NULL, "$"); cs.iterations = atoi(t); t = strtokm(NULL, "$"); strcpy((char*)cs.salt, t); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; p = strrchr(ciphertext, '$') + 1; base64_convert(p, e_b64_mime, strlen(p), (char*)out, e_b64_raw, sizeof(buf.c), flg_Base64_DONOT_NULL_TERMINATE, 0); return out; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT], i; unsigned char *pin[MAX_KEYS_PER_CRYPT]; union { uint32_t *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; x.pout[i] = crypt_out[i+index]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), 32, 0); #else pbkdf2_sha256((unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void django_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } struct fmt_main fmt_django = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, django_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, django_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
omp_thread_create_test_1.c
// execute in parallel // input the number of thread #include <stdlib.h> #include <stdio.h> #include <pthread.h> #include <omp.h> #include <sys/timeb.h> //#include <omp_interop.h> #include <unistd.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } void *test_fun(void *arg){ printf("omp_thread: %d\n", *((int*)arg)); return arg; } void *test_func_2(void *arg) { printf("omp_thread: %d\n", *((int*)arg)); omp_thread_exit(arg); } int num_threads = 100; int iter = 10000; int main(int argc, char * argv[]) { int tid; if (argc >= 2){ omp_set_num_threads(atoi(argv[1])); num_threads = atoi(argv[1]); } // omp_set_nested(); // create 50 threads and put them into threadpool #pragma omp parallel private(tid) num_threads(2) { int tid = omp_get_thread_num(); } int retval; int* ret_value = &retval; omp_thread_t ompthread_0; int arg; arg=0; omp_thread_create(&ompthread_0, test_fun, (void*)(&arg), NULL); omp_thread_join(&ompthread_0, (void**)(&ret_value)); printf("omp_thread 0 return: %d\n", *ret_value); // while(1); omp_thread_t ompthread_1; void * stack = malloc(4098); arg=1; omp_thread_create(&ompthread_1, test_fun, (void*)(&arg), stack); omp_thread_join(&ompthread_1, (void**)(&ret_value)); printf("omp_thread 1 return: %d\n", *ret_value); omp_thread_t ompthread_2; arg=2; omp_thread_create(&ompthread_2, test_fun, (void*)(&arg), stack); omp_thread_join(&ompthread_2, (void**)(&ret_value)); printf("omp_thread 2 return: %d\n", *ret_value); return 0; }
vulkan_basicengine_texture.h
/* * LearnVulkan BasicEngine * * Copyright (C) by engineer1109 - https://github.com/engineer1109/LearnVulkan * * This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT) */ #ifndef VULKAN_BASICENGINE_TEXTURE_H #define VULKAN_BASICENGINE_TEXTURE_H #ifdef WIN32 #define gli glm #endif #include <iostream> #include "VulkanTexture.hpp" #include "stb_image_aug.h" namespace vks { struct Texture2DStbImage: public Texture2D{ uint32_t channels=0; uint32_t size=0; std::vector<std::string> samplerNames{ "No mip maps" , "Mip maps (bilinear)" , "Mip maps (anisotropic)" }; std::vector<VkSampler> samplers; void loadFromFile( std::string filename, VkFormat format, vks::VulkanDevice *device, VkQueue copyQueue, VkImageUsageFlags imageUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT, VkImageLayout imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, bool forceLinear = false) { if (!vks::tools::fileExists(filename)) { vks::tools::exitFatal("Could not load texture from " + filename + "\n\nThe file may be part of the additional asset pack.\n\nRun \"download_assets.py\" in the repository root to download the latest version.", -1); } int w,h,n; unsigned char *img = stbi_load(filename.c_str(), &w, &h, &n, 0); this->device = device; width = static_cast<uint32_t>(w); height = static_cast<uint32_t>(h); mipLevels = static_cast<uint32_t>(1); channels=static_cast<uint32_t>(n); size=width*height*channels; // Get device properites for the requested texture format VkFormatProperties formatProperties; vkGetPhysicalDeviceFormatProperties(device->physicalDevice, format, &formatProperties); // Only use linear tiling if requested (and supported by the device) // Support for linear tiling is mostly limited, so prefer to use // optimal tiling instead // On most implementations linear tiling will only support a very // limited amount of formats and features (mip maps, cubemaps, arrays, etc.) VkBool32 useStaging = !forceLinear; VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; // Use a separate command buffer for texture loading VkCommandBuffer copyCmd = device->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); if (useStaging) { // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo(); bufferCreateInfo.size = width*height*channels; // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(device->logicalDevice, &bufferCreateInfo, nullptr, &stagingBuffer)); // Get memory requirements for the staging buffer (alignment, memory type bits) vkGetBufferMemoryRequirements(device->logicalDevice, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; // Get memory type index for a host visible buffer memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &stagingMemory)); VK_CHECK_RESULT(vkBindBufferMemory(device->logicalDevice, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *data; VK_CHECK_RESULT(vkMapMemory(device->logicalDevice, stagingMemory, 0, memReqs.size, 0, (void **)&data)); memcpy(data, img, size); vkUnmapMemory(device->logicalDevice, stagingMemory); // Setup buffer copy regions for each mip level std::vector<VkBufferImageCopy> bufferCopyRegions; uint32_t offset = 0; for (uint32_t i = 0; i < mipLevels; i++) { VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = i; bufferCopyRegion.imageSubresource.baseArrayLayer = 0; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = static_cast<uint32_t>(width); bufferCopyRegion.imageExtent.height = static_cast<uint32_t>(height); bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; bufferCopyRegions.push_back(bufferCopyRegion); offset += static_cast<uint32_t>(size); } // Create optimal tiled target image VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = mipLevels; imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCreateInfo.extent = { width, height, 1 }; imageCreateInfo.usage = imageUsageFlags; // Ensure that the TRANSFER_DST bit is set for staging if (!(imageCreateInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { imageCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; } VK_CHECK_RESULT(vkCreateImage(device->logicalDevice, &imageCreateInfo, nullptr, &image)); vkGetImageMemoryRequirements(device->logicalDevice, image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device->logicalDevice, image, deviceMemory, 0)); VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = mipLevels; subresourceRange.layerCount = 1; // Image barrier for optimal image (target) // Optimal image will be used as destination for the copy vks::tools::setImageLayout( copyCmd, image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy mip levels from staging buffer vkCmdCopyBufferToImage( copyCmd, stagingBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<uint32_t>(bufferCopyRegions.size()), bufferCopyRegions.data() ); // Change texture image layout to shader read after all mip levels have been copied this->imageLayout = imageLayout; vks::tools::setImageLayout( copyCmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, imageLayout, subresourceRange); device->flushCommandBuffer(copyCmd, copyQueue); // Clean up staging resources vkFreeMemory(device->logicalDevice, stagingMemory, nullptr); vkDestroyBuffer(device->logicalDevice, stagingBuffer, nullptr); } else { // Prefer using optimal tiling, as linear tiling // may support only a small set of features // depending on implementation (e.g. no mip maps, only one layer, etc.) // Check if this support is supported for linear tiling assert(formatProperties.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT); VkImage mappableImage; VkDeviceMemory mappableMemory; VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.extent = { width, height, 1 }; imageCreateInfo.mipLevels = 1; imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR; imageCreateInfo.usage = imageUsageFlags; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Load mip map level 0 to linear tiling image VK_CHECK_RESULT(vkCreateImage(device->logicalDevice, &imageCreateInfo, nullptr, &mappableImage)); // Get memory requirements for this image // like size and alignment vkGetImageMemoryRequirements(device->logicalDevice, mappableImage, &memReqs); // Set memory allocation size to required memory size memAllocInfo.allocationSize = memReqs.size; // Get memory type that can be mapped to host memory memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); // Allocate host memory VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &mappableMemory)); // Bind allocated image for use VK_CHECK_RESULT(vkBindImageMemory(device->logicalDevice, mappableImage, mappableMemory, 0)); // Get sub resource layout // Mip map count, array layer, etc. VkImageSubresource subRes = {}; subRes.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subRes.mipLevel = 0; VkSubresourceLayout subResLayout; void *data; // Get sub resources layout // Includes row pitch, size offsets, etc. vkGetImageSubresourceLayout(device->logicalDevice, mappableImage, &subRes, &subResLayout); // Map image memory VK_CHECK_RESULT(vkMapMemory(device->logicalDevice, mappableMemory, 0, memReqs.size, 0, &data)); // Copy image data into memory memcpy(data, img, size); vkUnmapMemory(device->logicalDevice, mappableMemory); // Linear tiled images don't need to be staged // and can be directly used as textures image = mappableImage; deviceMemory = mappableMemory; this->imageLayout = imageLayout; // Setup image memory barrier vks::tools::setImageLayout(copyCmd, image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, imageLayout); device->flushCommandBuffer(copyCmd, copyQueue); } // Create a defaultsampler VkSamplerCreateInfo samplerCreateInfo = {}; samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.magFilter = VK_FILTER_LINEAR; samplerCreateInfo.minFilter = VK_FILTER_LINEAR; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.mipLodBias = 0.0f; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0f; // Max level-of-detail should match mip level count samplerCreateInfo.maxLod = (useStaging) ? (float)mipLevels : 0.0f; // Only enable anisotropic filtering if enabled on the devicec samplerCreateInfo.maxAnisotropy = device->enabledFeatures.samplerAnisotropy ? device->properties.limits.maxSamplerAnisotropy : 1.0f; samplerCreateInfo.anisotropyEnable = device->enabledFeatures.samplerAnisotropy; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; VK_CHECK_RESULT(vkCreateSampler(device->logicalDevice, &samplerCreateInfo, nullptr, &sampler)); // Create image view // Textures are not directly accessed by the shaders and // are abstracted by image views containing additional // information and sub resource ranges VkImageViewCreateInfo viewCreateInfo = {}; viewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; viewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; viewCreateInfo.format = format; viewCreateInfo.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; viewCreateInfo.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }; // Linear tiling usually won't support mip maps // Only set mip map count if optimal tiling is used viewCreateInfo.subresourceRange.levelCount = (useStaging) ? mipLevels : 1; viewCreateInfo.image = image; VK_CHECK_RESULT(vkCreateImageView(device->logicalDevice, &viewCreateInfo, nullptr, &view)); // Update descriptor image info member that can be used for setting up descriptor sets updateDescriptor(); stbi_image_free(img); } void loadFromFileAutoGenMipmap( std::string filename, VkFormat format, vks::VulkanDevice *device, VkQueue copyQueue, VkImageUsageFlags imageUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT, VkImageLayout imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, bool forceLinear = false) { if (!vks::tools::fileExists(filename)) { vks::tools::exitFatal("Could not load texture from " + filename + "\n\nThe file may be part of the additional asset pack.\n\nRun \"download_assets.py\" in the repository root to download the latest version.", -1); } int w,h,n; unsigned char *img = stbi_load(filename.c_str(), &w, &h, &n, 0); this->device = device; width = static_cast<uint32_t>(w); height = static_cast<uint32_t>(h); channels=static_cast<uint32_t>(n); size=width*height*channels; VkFormatProperties formatProperties; // calculate num of mip maps // numLevels = 1 + floor(log2(max(w, h, d))) // Calculated as log2(max(width, height, depth))c + 1 (see specs) mipLevels = floor(log2(std::max(width, height))) + 1; // Get device properites for the requested texture format vkGetPhysicalDeviceFormatProperties(device->physicalDevice, format, &formatProperties); // Mip-chain generation requires support for blit source and destination assert(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT); assert(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_DST_BIT); VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs = {}; // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo(); bufferCreateInfo.size = size; // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(device->logicalDevice, &bufferCreateInfo, nullptr, &stagingBuffer)); vkGetBufferMemoryRequirements(device->logicalDevice, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &stagingMemory)); VK_CHECK_RESULT(vkBindBufferMemory(device->logicalDevice, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *data; VK_CHECK_RESULT(vkMapMemory(device->logicalDevice, stagingMemory, 0, memReqs.size, 0, (void **)&data)); memcpy(data, img, size); vkUnmapMemory(device->logicalDevice, stagingMemory); // Create optimal tiled target image VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = mipLevels; imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCreateInfo.extent = { width, height, 1 }; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; VK_CHECK_RESULT(vkCreateImage(device->logicalDevice, &imageCreateInfo, nullptr, &image)); vkGetImageMemoryRequirements(device->logicalDevice, image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device->logicalDevice, image, deviceMemory, 0)); VkCommandBuffer copyCmd = device->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.levelCount = 1; subresourceRange.layerCount = 1; // Optimal image will be used as destination for the copy, so we must transfer from our initial undefined image layout to the transfer destination layout vks::tools::setImageLayout( copyCmd, image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy the first mip of the chain, remaining mips will be generated VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = 0; bufferCopyRegion.imageSubresource.baseArrayLayer = 0; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = width; bufferCopyRegion.imageExtent.height = height; bufferCopyRegion.imageExtent.depth = 1; vkCmdCopyBufferToImage(copyCmd, stagingBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bufferCopyRegion); // Transition first mip level to transfer source for read during blit imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vks::tools::setImageLayout( copyCmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, subresourceRange); device->flushCommandBuffer(copyCmd, copyQueue, true); // Clean up staging resources vkFreeMemory(device->logicalDevice, stagingMemory, nullptr); vkDestroyBuffer(device->logicalDevice, stagingBuffer, nullptr); // Generate the mip chain // --------------------------------------------------------------- // We copy down the whole mip chain doing a blit from mip-1 to mip // An alternative way would be to always blit from the first mip level and sample that one down VkCommandBuffer blitCmd = device->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); // Copy down mips from n-1 to n for (int32_t i = 1; i < mipLevels; i++) { VkImageBlit imageBlit{}; // Source imageBlit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imageBlit.srcSubresource.layerCount = 1; imageBlit.srcSubresource.mipLevel = i-1; imageBlit.srcOffsets[1].x = int32_t(width >> (i - 1)); imageBlit.srcOffsets[1].y = int32_t(height >> (i - 1)); imageBlit.srcOffsets[1].z = 1; // Destination imageBlit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imageBlit.dstSubresource.layerCount = 1; imageBlit.dstSubresource.mipLevel = i; imageBlit.dstOffsets[1].x = int32_t(width >> i); imageBlit.dstOffsets[1].y = int32_t(height >> i); imageBlit.dstOffsets[1].z = 1; VkImageSubresourceRange mipSubRange = {}; mipSubRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; mipSubRange.baseMipLevel = i; mipSubRange.levelCount = 1; mipSubRange.layerCount = 1; // Transiton current mip level to transfer dest vks::tools::setImageLayout( blitCmd, image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, mipSubRange, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT); // Blit from previous level vkCmdBlitImage( blitCmd, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageBlit, VK_FILTER_LINEAR); // Transiton current mip level to transfer source for read in next iteration vks::tools::setImageLayout( blitCmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, mipSubRange, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); } // After the loop, all mip layers are in TRANSFER_SRC layout, so transition all to SHADER_READ subresourceRange.levelCount = mipLevels; vks::tools::setImageLayout( blitCmd, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, imageLayout, subresourceRange); device->flushCommandBuffer(blitCmd, copyQueue, true); // --------------------------------------------------------------- // Create samplers samplers.resize(3); VkSamplerCreateInfo sampler = vks::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT; sampler.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT; sampler.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT; sampler.mipLodBias = 0.0f; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = 0.0f; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; sampler.maxAnisotropy = 1.0; sampler.anisotropyEnable = VK_FALSE; // Without mip mapping VK_CHECK_RESULT(vkCreateSampler(device->logicalDevice, &sampler, nullptr, &samplers[0])); // With mip mapping sampler.maxLod = (float)mipLevels; VK_CHECK_RESULT(vkCreateSampler(device->logicalDevice, &sampler, nullptr, &samplers[1])); // With mip mapping and anisotropic filtering if (device->features.samplerAnisotropy) { sampler.maxAnisotropy = device->properties.limits.maxSamplerAnisotropy; sampler.anisotropyEnable = VK_TRUE; } VK_CHECK_RESULT(vkCreateSampler(device->logicalDevice, &sampler, nullptr, &samplers[2])); // Create image view VkImageViewCreateInfo viewCreateInfo = vks::initializers::imageViewCreateInfo(); viewCreateInfo.image = image; viewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; viewCreateInfo.format = format; viewCreateInfo.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; viewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; viewCreateInfo.subresourceRange.baseMipLevel = 0; viewCreateInfo.subresourceRange.baseArrayLayer = 0; viewCreateInfo.subresourceRange.layerCount = 1; viewCreateInfo.subresourceRange.levelCount = mipLevels; VK_CHECK_RESULT(vkCreateImageView(device->logicalDevice, &viewCreateInfo, nullptr, &view)); } void destroy() { vkDestroyImageView(device->logicalDevice, view, nullptr); vkDestroyImage(device->logicalDevice, image, nullptr); if(sampler){ vkDestroySampler(device->logicalDevice, sampler, nullptr); } for (auto samplers_member : samplers) { vkDestroySampler(device->logicalDevice, samplers_member, nullptr); } vkFreeMemory(device->logicalDevice, deviceMemory, nullptr); } }; class TextureCubeMapStbImage:public TextureCubeMap{ public: uint32_t channels=0; uint32_t size=0; void loadFromFile( std::vector<std::string> filenameList, VkFormat format, vks::VulkanDevice *device, VkQueue copyQueue, VkImageUsageFlags imageUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT, VkImageLayout imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { for(size_t i=0;i<filenameList.size();i++){ std::string filename=filenameList[i]; if (!vks::tools::fileExists(filename)) { vks::tools::exitFatal("Could not load texture from " + filename + "\n\nThe file may be part of the additional asset pack.\n\nRun \"download_assets.py\" in the repository root to download the latest version.", -1); } } int w,h,n; std::vector<uint8_t*> imgList(filenameList.size()); for (size_t i=0;i<imgList.size();i++) { imgList[i]=stbi_load(filenameList[i].c_str(), &w, &h, &n, 0); } this->device = device; width = static_cast<uint32_t>(w); height = static_cast<uint32_t>(h); mipLevels = static_cast<uint32_t>(1); channels=static_cast<uint32_t>(n); size=width*height*4*filenameList.size(); uint32_t imgSize=width*height*4; uint8_t* img=new uint8_t[size]; uint8_t* imgT=new uint8_t[size]; if(n==4){ for (size_t i=0;i<imgList.size();i++) { memcpy(img+i*width*height*channels,imgList[i],width*height*channels*sizeof(uint8_t)); stbi_image_free(imgList[i]); } } // Turn RGB to RGBA else if(n==3){ #pragma omp parallel for for (int i=0;i<imgList.size();i++) { for(uint32_t m=0;m<height;m++){ for (uint32_t n=0;n<width;n++) { img[imgSize*i+(n+width*m)*4+0]=imgList[i][(n+width*m)*3+0]; img[imgSize*i+(n+width*m)*4+1]=imgList[i][(n+width*m)*3+1]; img[imgSize*i+(n+width*m)*4+2]=imgList[i][(n+width*m)*3+2]; img[imgSize*i+(n+width*m)*4+3]=255; } } stbi_image_free(imgList[i]); } } #pragma omp parallel for for (int i=0;i<imgList.size();i++) { if(i==2){ for(uint32_t m=0;m<height;m++){ for (uint32_t n=0;n<width;n++) { imgT[imgSize*i+(n+width*m)*4+0]=img[imgSize*i+(m+width*(n))*4+0]; imgT[imgSize*i+(n+width*m)*4+1]=img[imgSize*i+(m+width*(n))*4+1]; imgT[imgSize*i+(n+width*m)*4+2]=img[imgSize*i+(m+width*(n))*4+2]; imgT[imgSize*i+(n+width*m)*4+3]=img[imgSize*i+(m+width*(n))*4+3]; } } } else if (i==3){ for(uint32_t m=0;m<height;m++){ for (uint32_t n=0;n<width;n++) { imgT[imgSize*i+(n+width*m)*4+0]=img[imgSize*i+((height-1-m)+width*(width-1-n))*4+0]; imgT[imgSize*i+(n+width*m)*4+1]=img[imgSize*i+((height-1-m)+width*(width-1-n))*4+1]; imgT[imgSize*i+(n+width*m)*4+2]=img[imgSize*i+((height-1-m)+width*(width-1-n))*4+2]; imgT[imgSize*i+(n+width*m)*4+3]=img[imgSize*i+((height-1-m)+width*(width-1-n))*4+3]; } } } else { for(uint32_t m=0;m<height;m++){ for (uint32_t n=0;n<width;n++) { imgT[imgSize*i+(n+width*m)*4+0]=img[imgSize*i+(n+width*(height-1-m))*4+0]; imgT[imgSize*i+(n+width*m)*4+1]=img[imgSize*i+(n+width*(height-1-m))*4+1]; imgT[imgSize*i+(n+width*m)*4+2]=img[imgSize*i+(n+width*(height-1-m))*4+2]; imgT[imgSize*i+(n+width*m)*4+3]=img[imgSize*i+(n+width*(height-1-m))*4+3]; } } } } VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo(); bufferCreateInfo.size = size; // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(device->logicalDevice, &bufferCreateInfo, nullptr, &stagingBuffer)); // Get memory requirements for the staging buffer (alignment, memory type bits) vkGetBufferMemoryRequirements(device->logicalDevice, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; // Get memory type index for a host visible buffer memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &stagingMemory)); VK_CHECK_RESULT(vkBindBufferMemory(device->logicalDevice, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *data; VK_CHECK_RESULT(vkMapMemory(device->logicalDevice, stagingMemory, 0, memReqs.size, 0, (void **)&data)); memcpy(data, imgT, size); vkUnmapMemory(device->logicalDevice, stagingMemory); // Setup buffer copy regions for each face including all of it's miplevels std::vector<VkBufferImageCopy> bufferCopyRegions; size_t offset = 0; for (uint32_t face = 0; face < 6; face++) { for (uint32_t level = 0; level < mipLevels; level++) { VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = level; bufferCopyRegion.imageSubresource.baseArrayLayer = face; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = static_cast<uint32_t>(w); bufferCopyRegion.imageExtent.height = static_cast<uint32_t>(h); bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; bufferCopyRegions.push_back(bufferCopyRegion); // Increase offset into staging buffer for next level / face offset += width*height*4; } } // Create optimal tiled target image VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = mipLevels; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCreateInfo.extent = { width, height, 1 }; imageCreateInfo.usage = imageUsageFlags; // Ensure that the TRANSFER_DST bit is set for staging if (!(imageCreateInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { imageCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; } // Cube faces count as array layers in Vulkan imageCreateInfo.arrayLayers = 6; // This flag is required for cube map images imageCreateInfo.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; VK_CHECK_RESULT(vkCreateImage(device->logicalDevice, &imageCreateInfo, nullptr, &image)); vkGetImageMemoryRequirements(device->logicalDevice, image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device->logicalDevice, image, deviceMemory, 0)); // Use a separate command buffer for texture loading VkCommandBuffer copyCmd = device->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); // Image barrier for optimal image (target) // Set initial layout for all array layers (faces) of the optimal (target) tiled texture VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = mipLevels; subresourceRange.layerCount = 6; vks::tools::setImageLayout( copyCmd, image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy the cube map faces from the staging buffer to the optimal tiled image vkCmdCopyBufferToImage( copyCmd, stagingBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<uint32_t>(bufferCopyRegions.size()), bufferCopyRegions.data()); // Change texture image layout to shader read after all faces have been copied this->imageLayout = imageLayout; vks::tools::setImageLayout( copyCmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, imageLayout, subresourceRange); device->flushCommandBuffer(copyCmd, copyQueue); // Create sampler VkSamplerCreateInfo samplerCreateInfo = vks::initializers::samplerCreateInfo(); samplerCreateInfo.magFilter = VK_FILTER_LINEAR; samplerCreateInfo.minFilter = VK_FILTER_LINEAR; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeV = samplerCreateInfo.addressModeU; samplerCreateInfo.addressModeW = samplerCreateInfo.addressModeU; samplerCreateInfo.mipLodBias = 0.0f; samplerCreateInfo.maxAnisotropy = device->enabledFeatures.samplerAnisotropy ? device->properties.limits.maxSamplerAnisotropy : 1.0f; samplerCreateInfo.anisotropyEnable = device->enabledFeatures.samplerAnisotropy; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0f; samplerCreateInfo.maxLod = (float)mipLevels; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; VK_CHECK_RESULT(vkCreateSampler(device->logicalDevice, &samplerCreateInfo, nullptr, &sampler)); // Create image view VkImageViewCreateInfo viewCreateInfo = vks::initializers::imageViewCreateInfo(); viewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_CUBE; viewCreateInfo.format = format; viewCreateInfo.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; viewCreateInfo.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }; viewCreateInfo.subresourceRange.layerCount = 6; viewCreateInfo.subresourceRange.levelCount = mipLevels; viewCreateInfo.image = image; VK_CHECK_RESULT(vkCreateImageView(device->logicalDevice, &viewCreateInfo, nullptr, &view)); // Clean up staging resources vkFreeMemory(device->logicalDevice, stagingMemory, nullptr); vkDestroyBuffer(device->logicalDevice, stagingBuffer, nullptr); // Update descriptor image info member that can be used for setting up descriptor sets updateDescriptor(); delete[] img; delete[] imgT; } }; } #endif // VULKAN_BASICENGINE_TEXTURE_H
convolution_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+1<inch; q+=2) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* kernel0 = kernel + p*inch*4 + q*4; const float* kernel1 = kernel0 + 4; const float* r00 = img0; const float* r01 = img0 + w; const float* r10 = img1; const float* r11 = img1 + w; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v12.4s}, [%3], #16 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v14.4s}, [%4], #16 \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v9.4s}, [%5] \n" "fmul v8.4s, v0.4s, %12.s[0] \n" "fmla v9.4s, v2.4s, %12.s[2] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v1.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v3.4s}, [%2], #16 \n" "ext v10.16b, v0.16b, v1.16b, #4 \n" "ext v11.16b, v2.16b, v3.16b, #4 \n" "fmla v8.4s, v12.4s, %13.s[0] \n" "fmla v9.4s, v14.4s, %13.s[2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v13.4s}, [%3], #16 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v15.4s}, [%4], #16 \n" "fmla v8.4s, v10.4s, %12.s[1] \n" "fmla v9.4s, v11.4s, %12.s[3] \n" "ext v10.16b, v12.16b, v13.16b, #4 \n" "ext v11.16b, v14.16b, v15.16b, #4 \n" "fmla v8.4s, v10.4s, %13.s[1] \n" "fmla v9.4s, v11.4s, %13.s[3] \n" "orr v0.16b, v1.16b, v1.16b \n" "orr v2.16b, v3.16b, v3.16b \n" "fadd v8.4s, v8.4s, v9.4s \n" "orr v12.16b, v13.16b, v13.16b \n" "orr v14.16b, v15.16b, v15.16b \n" "subs %w0, %w0, #1 \n" "st1 {v8.4s}, [%5], #16 \n" "bne 0b \n" "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d28-d29}, [%4]! \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d18-d19}, [%5] \n"// q9 = sum "vmul.f32 q8, q0, %e12[0] \n" "vmla.f32 q9, q2, %f12[0] \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q10, q0, q1, #1 \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q8, q12, %e13[0] \n" "vmla.f32 q9, q14, %f13[0] \n" "pld [%3, #128] \n" "vld1.f32 {d26-d27}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d30-d31}, [%4]! \n" "vmla.f32 q8, q10, %e12[1] \n" "vmla.f32 q9, q11, %f12[1] \n" "vext.f32 q10, q12, q13, #1 \n" "vext.f32 q11, q14, q15, #1 \n" "vmla.f32 q8, q10, %e13[1] \n" "vmla.f32 q9, q11, %f13[1] \n" "vorr q0, q1, q1 \n" "vorr q2, q3, q3 \n" "vadd.f32 q8, q8, q9 \n" "vorr q12, q13, q13 \n" "vorr q14, q15, q15 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%5]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" "sub %3, #16 \n" "sub %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x2_t _r00 = vld1_f32(r00); float32x2_t _r01 = vld1_f32(r01); float32x4_t _r00r1 = vcombine_f32(_r00, _r01); float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0); float32x2_t _r10 = vld1_f32(r10); float32x2_t _r11 = vld1_f32(r11); float32x4_t _r10r1 = vcombine_f32(_r10, _r11); _s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r00[0] * kernel0[0]; sum += r00[1] * kernel0[1]; sum += r01[0] * kernel0[2]; sum += r01[1] * kernel0[3]; sum += r10[0] * kernel1[0]; sum += r10[1] * kernel1[1]; sum += r11[0] * kernel1[2]; sum += r11[1] * kernel1[3]; *outptr += sum; #endif // __ARM_NEON r00 += 1; r01 += 1; r10 += 1; r11 += 1; outptr++; } r00 += 1; r01 += 1; r10 += 1; r11 += 1; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*4 + q*4; const float* r0 = img0; const float* r1 = img0 + w; #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(kernel0[0]); float32x4_t _k1 = vdupq_n_f32(kernel0[1]); float32x4_t _k2 = vdupq_n_f32(kernel0[2]); float32x4_t _k3 = vdupq_n_f32(kernel0[3]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v9.4s}, [%3] \n" "fmul v8.4s, v0.4s, %8.4s \n" "fmla v9.4s, v2.4s, %10.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v1.4s}, [%1], #16 \n" "ext v10.16b, v0.16b, v1.16b, #4 \n" "fmla v8.4s, v10.4s, %9.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v3.4s}, [%2], #16 \n" "ext v11.16b, v2.16b, v3.16b, #4 \n" "fmla v9.4s, v11.4s, %11.4s \n" "orr v0.16b, v1.16b, v1.16b \n" "fadd v8.4s, v8.4s, v9.4s \n" "orr v2.16b, v3.16b, v3.16b \n" "subs %w0, %w0, #1 \n" "st1 {v8.4s}, [%3], #16 \n" "bne 0b \n" "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "0: \n" "pld [%3, #128] \n" "vld1.f32 {d18-d19}, [%3] \n"// q9 = sum "vmul.f32 q8, q0, %q8 \n" "vmla.f32 q9, q2, %q10 \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "vext.f32 q10, q0, q1, #1 \n" "vmla.f32 q8, q10, %q9 \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q9, q11, %q11 \n" "vorr q0, q1, q1 \n" "vadd.f32 q8, q8, q9 \n" "vorr q2, q3, q3 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%3]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11" ); } #endif // __aarch64__ #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); #endif for (; remain>0; remain--) { #if __ARM_NEON float32x2_t _r0 = vld1_f32(r0); float32x2_t _r1 = vld1_f32(r1); float32x4_t _r0r1 = vcombine_f32(_r0, _r1); float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r0[0] * kernel0[0]; sum += r0[1] * kernel0[1]; sum += r1[0] * kernel0[2]; sum += r1[1] * kernel0[3]; *outptr += sum; #endif r0 += 1; r1 += 1; outptr++; } r0 += 1; r1 += 1; } } } }
GB_unop__ainv_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_int64_int64 // op(A') function: GB_unop_tran__ainv_int64_int64 // C type: int64_t // A type: int64_t // cast: int64_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ int64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_int64_int64 ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = -z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
path.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : path.c * Description : path drawing * * + This is part of libaroma, an embedded ui toolkit. * + 06/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_path_c__ #define __libaroma_path_c__ #include <aroma_internal.h> #ifdef __cplusplus extern "C" { #endif /* * Function : libaroma_path * Return Value: LIBAROMA_PATHP * Descriptions: create new path */ LIBAROMA_PATHP libaroma_path(float x, float y){ LIBAROMA_PATHP path = (LIBAROMA_PATHP) calloc(sizeof(LIBAROMA_PATH),1); if (!path){ ALOGW("libaroma_path alloc LIBAROMA_PATHP failed"); return NULL; } path->p=(LIBAROMA_PATH_POINTP) malloc(sizeof(LIBAROMA_PATH_POINT)*32); if (!path->p){ free(path); ALOGW("libaroma_path alloc path->p failed"); return NULL; } path->p[0].x=x; path->p[0].y=y; path->max.x=path->min.x=x; path->max.y=path->min.y=y; path->n=1; return path; } /* End of libaroma_path */ /* * Function : libaroma_path_free * Return Value: byte * Descriptions: free path */ byte libaroma_path_free(LIBAROMA_PATHP path){ if (!path){ return 0; } if (path->p){ free(path->p); } free(path); return 1; } /* End of libaroma_path_free */ /* * Function : libaroma_path_add * Return Value: byte * Descriptions: add point into path */ byte libaroma_path_add(LIBAROMA_PATHP path, float x, float y){ if (!path){ return 0; } if (!path->p){ return 0; } if (path->n%32==0){ LIBAROMA_PATH_POINTP newp = (LIBAROMA_PATH_POINTP) realloc( path->p,sizeof(LIBAROMA_PATH_POINT)*(path->n+32) ); if (!newp){ ALOGW("libaroma_path_add cannot realloc path->p"); return 0; } path->p = newp; } path->p[path->n].x=x; path->p[path->n].y=y; path->max.x=MAX(path->max.x,x); path->max.y=MAX(path->max.y,y); path->min.x=MIN(path->min.x,x); path->min.y=MIN(path->min.y,y); path->n++; return 1; } /* End of libaroma_path_add */ /* * Function : libaroma_path_curve_calc * Return Value: void * Descriptions: calculating bezier curve */ void libaroma_path_curve_calc( float t, float *x, float *y, float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3){ float u = 1-t; float tt = t*t; float uu = u*u; float uuu = uu * u; float ttt = tt * t; /* calculating */ *x = uuu * x0; *x += 3 * uu * t * x1; *x += 3 * u * tt * x2; *x += ttt * x3; *y = uuu * y0; *y += 3 * uu * t * y1; *y += 3 * u * tt * y2; *y += ttt * y3; } /* End of libaroma_path_curve_calc */ /* * Function : _libaroma_path_curve_findpoint * Return Value: byte * Descriptions: find curve path points */ byte _libaroma_path_curve_findpoint( LIBAROMA_PATHP path, float t0, float t1, float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3, float xt0, float yt0, float xt1, float yt1 ){ if (t0==t1){ return 0; } float thalf = t0 + ((t1 - t0) / 2); float xt, yt; libaroma_path_curve_calc(thalf, &xt, &yt,x0,y0,x1,y1,x2,y2,x3,y3); if ((abs(xt-xt0)>=2)||(abs(yt-yt0)>=2)) { _libaroma_path_curve_findpoint( path,t0,thalf,x0,y0,x1,y1,x2,y2,x3,y3,xt0,yt0,xt,yt); } libaroma_path_add(path, xt, yt); if ((abs(xt-xt1)>=2)||(abs(yt-yt1)>=2)) { _libaroma_path_curve_findpoint( path,thalf,t1,x0,y0,x1,y1,x2,y2,x3,y3,xt,yt,xt1,yt1); } libaroma_path_add(path, xt1, yt1); return 1; } /* End of _libaroma_path_curve_findpoint */ /* * Function : libaroma_path_curve * Return Value: byte * Descriptions: add curve point */ byte libaroma_path_curve( LIBAROMA_PATHP path, int resolution, float x1, float y1, float x2, float y2, float x3, float y3 ){ if (!path){ return 0; } if (!path->p){ return 0; } if (resolution<1){ /* dynamic hi res curve calculation */ float x0 = path->p[path->n-1].x; float y0 = path->p[path->n-1].y; _libaroma_path_curve_findpoint( path,0,1,x0,y0,x1,y1,x2,y2,x3,y3,x0,y0,x3,y3); } else{ /* fixed resolution */ int i; float x0 = path->p[path->n-1].x; float y0 = path->p[path->n-1].y; int px = round(x0); int py = round(y0); for(i=0;i<resolution;i++){ float x, y; float t = i / ((float) (resolution-1)); libaroma_path_curve_calc(t,&x,&y,x0,y0,x1,y1,x2,y2,x3,y3); int rx = round(x); int ry = round(y); if ((px!=rx)||(py!=ry)){ libaroma_path_add(path, x, y); } } } return 1; } /* End of libaroma_path_curve */ /* * Function : libaroma_path_draw_filled * Return Value: byte * Descriptions: draw canvas-filled path */ byte libaroma_path_draw_filled( LIBAROMA_CANVASP dest, LIBAROMA_CANVASP src, LIBAROMA_PATHP path, word color, byte alpha, byte is_mask, float aliasing){ if (!dest){ dest=libaroma_fb()->canvas; } if ((is_mask)&&(dest->alpha==NULL)){ return 0; } if (!path){ return 0; } if ((!is_mask)&&(alpha<1)){ return 1; } if (aliasing<=0){ aliasing=1; } if (aliasing>1){ aliasing=1; } //scaled canvas pointer LIBAROMA_CANVASP cv=NULL; /* fill */ if (path->n>1){ int miny = MAX(0,floor(path->min.y)); int maxy = MIN(dest->h-1,ceil(path->max.y)); int minx = MAX(0,floor(path->min.x)); int dwidth = MIN(dest->w,ceil(path->max.x))-minx; if (dwidth<1){ return 1; } float alias_sz = 1/aliasing; byte alphaaa=alpha*aliasing; if (is_mask==2){ alphaaa=255*aliasing; } int py=0; if (src!=NULL){ cv = libaroma_canvas_ex(src->w, maxy-miny, (src->alpha==NULL)?0:1); libaroma_draw_scale_nearest(cv, src, 0, 0, src->w, cv->h, 0, 0, src->w, src->h); } /* loop through the rows of the image. */ #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (py=miny;py<=maxy;py++) { bytep line=NULL; if (is_mask){ line = dest->alpha + py * dest->l + minx; } else{ line = calloc(dwidth,1); } float * nodes = (float *) malloc(sizeof(float) * path->n); int pyn, drawy=0; for (pyn=0;pyn<alias_sz;pyn++){ float fy = ((float) py)+(((float) pyn)*aliasing); int i, n=0, j=path->n-1; /* find nodes */ for (i=0;i<path->n;i++){ if ( ((path->p[i].y<fy)&&(path->p[j].y>=fy))|| ((path->p[j].y<fy)&&(path->p[i].y>=fy)) ){ nodes[n++] = ( (path->p[i].x+(fy-path->p[i].y)/(path->p[j].y-path->p[i].y)* (path->p[j].x-path->p[i].x))) - ((float) minx); } j = i; } /* there is nodes */ if (n>1){ i=0; while (i<n-1){ if (nodes[i]>nodes[i+1]){ float tmp=nodes[i]; nodes[i]=nodes[i+1]; nodes[i+1]=tmp; if (i>0){ i--; } } else{ i++; } } /* process alpha values */ for (i=0;i<n;i+=2){ if (nodes[i]>=dwidth){ break; } if (nodes[i+1]>0){ if (nodes[i]<0){ nodes[i]=0; } if (nodes[i+1]>dwidth){ nodes[i+1]=dwidth; } } else{ continue; } if (nodes[i+1]-nodes[i]<1){ continue; } if (aliasing==1){ int linex=(int) floor(nodes[i]); int linew=((int) floor(nodes[i+1]))-linex; memset(line+linex,alpha,linew); if (src!=NULL){ //scale line accordingly libaroma_draw_scale_nearest(dest, cv, minx+linex, py, linew, 1, 0, py-miny, cv->w, 1); } } else{ int px; /* left & right aliasing */ int linex=floor(nodes[i]); int linerx=floor(nodes[i+1]); if (is_mask!=2){ line[linex]= MIN(255,line[linex]+(1.0-fmod(nodes[i],1))*alphaaa); line[linerx]= MIN(255,line[linerx]+fmod(nodes[i+1],1)*alphaaa); } else{ line[linex]= MAX(0,((int) line[linex])-(1.0-fmod(nodes[i],1))*alphaaa); line[linerx]= MAX(0,((int) line[linerx])-fmod(nodes[i+1],1)*alphaaa); } linex++; int linew=linerx-linex; if (linew<1){ continue; } bytep cline=line+linex; int left=linew; #ifdef __ARM_NEON__ left=linew%8; if (linew>=8){ uint8x8_t ro = vmov_n_u8(alphaaa); if (is_mask!=2){ uint16x8_t v255 = vdupq_n_u16(alpha); for (px=0;px<linew-left;px+=8) { uint8x8_t op = vld1_u8(cline+px); vst1_u8(cline+px, vmovn_u16(vminq_u16(vaddl_u8(op, ro),v255))); } } else{ uint8x8_t v0 = vmov_n_u8(0); for (px=0;px<linew-left;px+=8) { uint8x8_t op = vld1_u8(cline+px); vst1_u8(cline+px, vmax_u8(vsub_u8(op,ro),v0)); } } } #endif if (is_mask!=2){ for (px=linew-left;px<linew;px++){ cline[px]=MIN(alpha,cline[px]+alphaaa); } } else{ for (px=linew-left;px<linew;px++){ cline[px]=MAX(0,((int) cline[px])-alphaaa); } } } } } } free(nodes); if (!is_mask){ /* process */ if (line!=NULL){ if (src==NULL){ wordp color_line = dest->data + py * dest->l + minx; libaroma_alpha_mono(dwidth,color_line,color_line,color,line); } free(line); } } } if (cv!=NULL) libaroma_canvas_free(cv); } return 1; } /* End of libaroma_path_draw_filled */ #ifdef __cplusplus } #endif #endif /* __libaroma_path_c__ */
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__abs_fp32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_fp64 // op(A') function: GB_tran__abs_fp32_fp64 // C type: float // A type: double // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ double #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_fp64 ( float *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gemm.c
#include "gemm.h" #include "utils.h" #include "im2col.h" #include "dark_cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <float.h> #include <string.h> #include <stdint.h> #ifdef _WIN32 #include <intrin.h> #endif #if defined(_OPENMP) #include <omp.h> #endif #define TILE_M 4 // 4 ops #define TILE_N 16 // AVX2 = 2 ops * 8 floats #define TILE_K 16 // loop #ifdef __cplusplus #define PUT_IN_REGISTER #else #define PUT_IN_REGISTER register #endif void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float* m = (float*)calloc(rows * cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } //-------------------------------------------- // XNOR bitwise GEMM for binary neural network //-------------------------------------------- static inline unsigned char xnor(unsigned char a, unsigned char b) { //return a == b; return !(a^b); } // INT-32 static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) { size_t src_i = index / 32; int src_shift = index % 32; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; return val; } static inline uint32_t xnor_int32(uint32_t a, uint32_t b) { return ~(a^b); } static inline uint64_t xnor_int64(uint64_t a, uint64_t b) { return ~(a^b); } static inline uint32_t fill_bit_int32(char src) { if (src == 0) return 0x00000000; else return 0xFFFFFFFF; } static inline uint64_t fill_bit_int64(char src) { if (src == 0) return 0x0000000000000000; else return 0xFFFFFFFFFFFFFFFF; } void binary_int32_printf(uint32_t src) { int i; for (i = 0; i < 32; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } void binary_int64_printf(uint64_t src) { int i; for (i = 0; i < 64; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } /* void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i, j, k; for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] char a_bit = get_bit(A, i*lda + k); for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] char b_bit = get_bit(B, k*ldb + j); count_arr[i*ldc + j] += xnor(a_bit, b_bit); } } } for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; } } free(count_arr); } */ /* void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i, j, k; for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] char a_bit = get_bit(A, i*lda + k); char b_bit = get_bit(B, j*ldb + k); count_arr[i*ldc + j] += xnor(a_bit, b_bit); } } } for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; } } free(count_arr); } */ /* void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k, h; for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] const char a_bit = get_bit(A, i*lda + k); uint64_t a_bit64 = fill_bit_int64(a_bit); int k_ldb = k*ldb; for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056] if ((N - j > 64) && (k_ldb % 8 == 0)) { uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); //printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc printf("\n %d \n", __popcnt64(c_bit64)); // msvs int h; for (h = 0; h < 64; ++h) if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1; //binary_int64_printf(a_bit64); //binary_int64_printf(b_bit64); //binary_int64_printf(c_bit64); } else { for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] char b_bit = get_bit(B, k_ldb + j); if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1; } } } } } if (mean_arr) { //int K_2 = K / 2; for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; //float mean_val2 = 2 * mean_val; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; //C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2; } } } else { for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { C[i*ldc + j] = count_arr[i*ldc + j] - K / 2; } } } free(count_arr); //getchar(); } */ /* void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k, h; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); #ifdef WIN32 int tmp_count = __popcnt64(c_bit64); #else int tmp_count = __builtin_popcountll(c_bit64); #endif if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } */ //---------------------------- // is not used void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb) { unsigned int x, y; for (y = 0; y < 32; ++y) { for (x = 0; x < 32; ++x) { if (A[y * lda] & (1 << x)) B[x * ldb] |= (uint32_t)1 << y; } } } #ifndef GPU uint8_t reverse_8_bit(uint8_t a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } uint32_t reverse_32_bit(uint32_t a) { // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input)); return (reverse_8_bit(a >> 24) << 0) | (reverse_8_bit(a >> 16) << 8) | (reverse_8_bit(a >> 8) << 16) | (reverse_8_bit(a >> 0) << 24); } #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j); void transpose32_optimized(uint32_t A[32]) { int j, k; unsigned m, t; //m = 0x0000FFFF; //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) { // for (k = 0; k < 32; k = (k + j + 1) & ~j) { // t = (A[k] ^ (A[k + j] >> j)) & m; // A[k] = A[k] ^ t; // A[k + j] = A[k + j] ^ (t << j); // } //} j = 16; m = 0x0000FFFF; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 8; m = 0x00ff00ff; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 4; m = 0x0f0f0f0f; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 2; m = 0x33333333; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 1; m = 0x55555555; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } // reverse Y for (j = 0; j < 16; ++j) { uint32_t tmp = A[j]; A[j] = reverse_32_bit(A[31 - j]); A[31 - j] = reverse_32_bit(tmp); } } void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n) { unsigned A_tmp[32]; int i; #pragma unroll for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; transpose32_optimized(A_tmp); #pragma unroll for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; } void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb) { unsigned x, y; for (y = 0; y < 8; ++y) { for (x = 0; x < 8; ++x) { if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y; } } } unsigned char reverse_byte_1(char a) { return ((a & 0x1) << 7) | ((a & 0x2) << 5) | ((a & 0x4) << 3) | ((a & 0x8) << 1) | ((a & 0x10) >> 1) | ((a & 0x20) >> 3) | ((a & 0x40) >> 5) | ((a & 0x80) >> 7); } unsigned char reverse_byte(unsigned char a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } static unsigned char lookup[16] = { 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, }; unsigned char reverse_byte_3(unsigned char n) { // Reverse the top and bottom nibble then swap them. return (lookup[n & 0b1111] << 4) | lookup[n >> 4]; } void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n) { unsigned x, y, t; x = y = 0; // Load the array and pack it into x and y. //x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]; //y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]; t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x); B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y); } /* // transpose by 8-bit void transpose_bin(char *A, char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { //printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda); int i; #pragma omp parallel for for (i = 0; i < n; i += 8) { int j; for (j = 0; j < m; j += 8) { int a_index = i*lda + j; int b_index = j*ldb + i; //transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8); transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8); } for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); } } } */ #endif // transpose by 32-bit void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m, const int lda, const int ldb, const int block_size) { //printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32); //printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32); int i; #pragma omp parallel for for (i = 0; i < n; i += 32) { int j; for (j = 0; j < m; j += 32) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32); //transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32); } for (; j < m; ++j) { if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i); } } } static inline int popcnt_32(uint32_t val32) { #ifdef WIN32 // Windows MSVS int tmp_count = __popcnt(val32); #else // Linux GCC int tmp_count = __builtin_popcount(val32); #endif return tmp_count; } //---------------------------- #if (defined(__AVX__) && defined(__x86_64__)) || defined(_WIN64) #ifdef _WIN64 #include <intrin.h> #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #if defined(_MSC_VER) && _MSC_VER <= 1900 static inline __int32 _mm256_extract_epi64(__m256i a, const int index) { return a.m256i_i64[index]; } static inline __int32 _mm256_extract_epi32(__m256i a, const int index) { return a.m256i_i32[index]; } #endif static inline float _castu32_f32(uint32_t a) { return *((float *)&a); } static inline float _mm256_extract_float32(__m256 a, const int index) { return a.m256_f32[index]; } #else // Linux GCC/Clang #include <x86intrin.h> #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #include <cpuid.h> static inline float _castu32_f32(uint32_t a) { return *((float *)&a); } static inline float _mm256_extract_float32(__m256 a, const int index) { return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), index)); } void asm_cpuid(uint32_t* abcd, uint32_t eax) { uint32_t ebx = 0, edx = 0, ecx = 0; // EBX is saved to EDI and later restored __asm__("movl %%ebx, %%edi;" "cpuid;" "xchgl %%ebx, %%edi;" : "=D"(ebx), "+a"(eax), "+c"(ecx), "=d"(edx)); abcd[0] = eax; abcd[1] = ebx; abcd[2] = ecx; abcd[3] = edx; } #endif #ifdef _WIN32 // Windows #define cpuid(info, x) __cpuidex(info, x, 0) #else // GCC Intrinsics void cpuid(int info[4], int InfoType) { __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]); } #endif // Misc. static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1; static int HW_ABM; // Advanced Bit Manipulation // SIMD: 128-bit static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA; // SIMD: 256-bit static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2; // SIMD: 512-bit static int HW_AVX512F; // AVX512 Foundation static int HW_AVX512CD; // AVX512 Conflict Detection static int HW_AVX512PF; // AVX512 Prefetch static int HW_AVX512ER; // AVX512 Exponential + Reciprocal static int HW_AVX512VL; // AVX512 Vector Length Extensions static int HW_AVX512BW; // AVX512 Byte + Word static int HW_AVX512DQ; // AVX512 Doubleword + Quadword static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions // https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set void check_cpu_features(void) { int info[4]; cpuid(info, 0); int nIds = info[0]; cpuid(info, 0x80000000); unsigned nExIds = info[0]; // Detect Features if (nIds >= 0x00000001) { cpuid(info, 0x00000001); HW_MMX = (info[3] & ((int)1 << 23)) != 0; HW_SSE = (info[3] & ((int)1 << 25)) != 0; HW_SSE2 = (info[3] & ((int)1 << 26)) != 0; HW_SSE3 = (info[2] & ((int)1 << 0)) != 0; HW_SSSE3 = (info[2] & ((int)1 << 9)) != 0; HW_SSE41 = (info[2] & ((int)1 << 19)) != 0; HW_SSE42 = (info[2] & ((int)1 << 20)) != 0; HW_AES = (info[2] & ((int)1 << 25)) != 0; HW_AVX = (info[2] & ((int)1 << 28)) != 0; HW_FMA3 = (info[2] & ((int)1 << 12)) != 0; HW_RDRAND = (info[2] & ((int)1 << 30)) != 0; } if (nIds >= 0x00000007) { cpuid(info, 0x00000007); HW_AVX2 = (info[1] & ((int)1 << 5)) != 0; HW_BMI1 = (info[1] & ((int)1 << 3)) != 0; HW_BMI2 = (info[1] & ((int)1 << 8)) != 0; HW_ADX = (info[1] & ((int)1 << 19)) != 0; HW_SHA = (info[1] & ((int)1 << 29)) != 0; HW_PREFETCHWT1 = (info[2] & ((int)1 << 0)) != 0; HW_AVX512F = (info[1] & ((int)1 << 16)) != 0; HW_AVX512CD = (info[1] & ((int)1 << 28)) != 0; HW_AVX512PF = (info[1] & ((int)1 << 26)) != 0; HW_AVX512ER = (info[1] & ((int)1 << 27)) != 0; HW_AVX512VL = (info[1] & ((int)1 << 31)) != 0; HW_AVX512BW = (info[1] & ((int)1 << 30)) != 0; HW_AVX512DQ = (info[1] & ((int)1 << 17)) != 0; HW_AVX512IFMA = (info[1] & ((int)1 << 21)) != 0; HW_AVX512VBMI = (info[2] & ((int)1 << 1)) != 0; } if (nExIds >= 0x80000001) { cpuid(info, 0x80000001); HW_x64 = (info[3] & ((int)1 << 29)) != 0; HW_ABM = (info[2] & ((int)1 << 5)) != 0; HW_SSE4a = (info[2] & ((int)1 << 6)) != 0; HW_FMA4 = (info[2] & ((int)1 << 16)) != 0; HW_XOP = (info[2] & ((int)1 << 11)) != 0; } } int is_avx() { static int result = -1; if (result == -1) { check_cpu_features(); result = HW_AVX; if (result == 1) printf(" Used AVX \n"); else printf(" Not used AVX \n"); } return result; } int is_fma_avx2() { static int result = -1; if (result == -1) { check_cpu_features(); result = HW_FMA3 && HW_AVX2; if (result == 1) printf(" Used FMA & AVX2 \n"); else printf(" Not used FMA & AVX2 \n"); } return result; } // https://software.intel.com/sites/landingpage/IntrinsicsGuide void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; if (is_avx() == 1) { // AVX for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { float A_PART = ALPHA*A[i*lda + k]; __m256 a256, b256, c256, result256; // AVX a256 = _mm256_set1_ps(A_PART); for (j = 0; j < N - 8; j += 8) { b256 = _mm256_loadu_ps(&B[k*ldb + j]); c256 = _mm256_loadu_ps(&C[i*ldc + j]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //result256 = _mm256_fmadd_ps(a256, b256, c256); result256 = _mm256_mul_ps(a256, b256); result256 = _mm256_add_ps(result256, c256); _mm256_storeu_ps(&C[i*ldc + j], result256); } int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8; for (j = prev_end; j < N; ++j) C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } else { for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } /* // SSE __m128 a128, b128, c128, result128; // SSE a128 = _mm_set1_ps(A_PART); for (j = 0; j < N - 4; j += 4) { b128 = _mm_loadu_ps(&B[k*ldb + j]); c128 = _mm_loadu_ps(&C[i*ldc + j]); //result128 = _mm_fmadd_ps(a128, b128, c128); result128 = _mm_mul_ps(a128, b128); result128 = _mm_add_ps(result128, c128); _mm_storeu_ps(&C[i*ldc + j], result128); } int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4; for (j = prev_end; j < N; ++j){ C[i*ldc + j] += A_PART*B[k*ldb + j]; } */ } } } } void gemm_nn_fast(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i; #pragma omp parallel for for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M) { int j, k; int i_d, k_d; for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K) { for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N) { // L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB // L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB // L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB __m256 result256; __m256 a256_0, b256_0; // AVX __m256 a256_1, b256_1; // AVX __m256 a256_2;// , b256_2; // AVX __m256 a256_3;// , b256_3; // AVX __m256 c256_0, c256_1, c256_2, c256_3; __m256 c256_4, c256_5, c256_6, c256_7; c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]); c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]); c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]); c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]); c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]); c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]); c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]); c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]); for (k_d = 0; k_d < (TILE_K); ++k_d) { a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]); a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]); a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]); a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]); b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]); b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0); //c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1); //c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2); //c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3); //c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4); //c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5); //c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6); //c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7); result256 = _mm256_mul_ps(a256_0, b256_0); c256_0 = _mm256_add_ps(result256, c256_0); result256 = _mm256_mul_ps(a256_1, b256_0); c256_1 = _mm256_add_ps(result256, c256_1); result256 = _mm256_mul_ps(a256_0, b256_1); c256_2 = _mm256_add_ps(result256, c256_2); result256 = _mm256_mul_ps(a256_1, b256_1); c256_3 = _mm256_add_ps(result256, c256_3); result256 = _mm256_mul_ps(a256_2, b256_0); c256_4 = _mm256_add_ps(result256, c256_4); result256 = _mm256_mul_ps(a256_3, b256_0); c256_5 = _mm256_add_ps(result256, c256_5); result256 = _mm256_mul_ps(a256_2, b256_1); c256_6 = _mm256_add_ps(result256, c256_6); result256 = _mm256_mul_ps(a256_3, b256_1); c256_7 = _mm256_add_ps(result256, c256_7); } _mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0); _mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1); _mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2); _mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3); _mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4); _mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5); _mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6); _mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7); } for (j = (N / TILE_N)*TILE_N; j < N; ++j) { for (i_d = i; i_d < (i + TILE_M); ++i_d) { for (k_d = k; k_d < (k + TILE_K); ++k_d) { PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d]; C[i_d*ldc + j] += A_PART*B[k_d*ldb + j]; } } } } for (k = (K / TILE_K)*TILE_K; k < K; ++k) { for (i_d = i; i_d < (i + TILE_M); ++i_d) { PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k]; for (j = 0; j < N; ++j) { C[i_d*ldc + j] += A_PART*B[k*ldb + j]; } } } } for (i = (M / TILE_M)*TILE_M; i < M; ++i) { int j, k; for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]); for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s]; __m256i a256 = _mm256_set1_epi32(A_PART); for (j = 0; j < N - 8; j += 8) { __m256i b256 = *((__m256i*)&B[s*ldb + j]); __m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b) __m256i all_1 = _mm256_set1_epi8((char)255); __m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b)) // waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a) __m256 count = _mm256_setr_ps( popcnt_32(_mm256_extract_epi32(xnor256, 0)), popcnt_32(_mm256_extract_epi32(xnor256, 1)), popcnt_32(_mm256_extract_epi32(xnor256, 2)), popcnt_32(_mm256_extract_epi32(xnor256, 3)), popcnt_32(_mm256_extract_epi32(xnor256, 4)), popcnt_32(_mm256_extract_epi32(xnor256, 5)), popcnt_32(_mm256_extract_epi32(xnor256, 6)), popcnt_32(_mm256_extract_epi32(xnor256, 7))); __m256 val2 = _mm256_set1_ps(2); count = _mm256_mul_ps(count, val2); // count * 2 __m256 val32 = _mm256_set1_ps(32); count = _mm256_sub_ps(count, val32); // count - 32 __m256 mean256 = _mm256_set1_ps(mean_val); count = _mm256_mul_ps(count, mean256); // count * mean_val __m256 c256 = *((__m256*)&C[i*ldc + j]); count = _mm256_add_ps(count, c256); // c = c + count *((__m256*)&C[i*ldc + j]) = count; } for (; j < N; ++j) // out_h*out_w; { PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; } } } } void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output) { //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { //int i, f, j; int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; float sum = 0; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; sum += input[input_index] * weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output, float *mean) { //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 int i; #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads( max_num_threads / 2); } #endif //convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output); __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); for (i = 0; i < ksize*ksize*n*c; i+=8) { *((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1)); } //for (i = 0; i < w*h*c; i += 8) { //*((__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1)); //} //__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF); //all256_last_zero.m256i_i32[7] = 0; __m256i all256_last_zero = _mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0); __m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1); //__m256 all256_sing1 = _mm256_set1_ps(0x80000000); __m256 all256_one = _mm256_set1_ps(1); __m256i all256i_one = _mm256_set1_epi32(1); ///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); ///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { int chan, y, x, f_y, f_x; float cur_mean = fabs(mean[fil]); __m256 mean256 = _mm256_set1_ps(cur_mean); // channel index //for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w-8; x+=8) { int const output_index = fil*w*h + y*w + x; float sum = 0; __m256 sum256 = _mm256_set1_ps(0); for (chan = 0; chan < c; ++chan) { int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; //__m256 in = *((__m256*)&input[input_pre_index + input_y*w]); if (input_y < 0 || input_y >= h) continue; //__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]); // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; //if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; //if (input_y < 0 || input_y >= h) continue; //sum += input[input_index] * weights[weights_index]; __m256 in = *((__m256*)&input[input_index]); __m256 w = _mm256_set1_ps(weights[weights_index]); //__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats __m256 xor256 = _mm256_xor_ps(w, in); //printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]); //printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]); //__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256); //sum256 = xor256; sum256 = _mm256_add_ps(xor256, sum256); //printf("\n --- \n"); //printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]); if (f_x < ksize-1) { //in = _mm256_permutevar8x32_ps(in, idx256); //in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero)); } } } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; //output[output_index] += sum; sum256 = _mm256_mul_ps(sum256, mean256); //printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n", // cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]); //__m256 out = *((__m256*)&output[output_index]); //out = _mm256_add_ps(out, sum256); //*((__m256*)&output[output_index]) = out; *((__m256*)&output[output_index]) = sum256; //_mm256_storeu_ps(&C[i*ldc + j], result256); } } } // http://graphics.stanford.edu/~seander/bithacks.html // https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register // https://arxiv.org/pdf/1611.07612.pdf static inline int popcnt128(__m128i n) { const __m128i n_hi = _mm_unpackhi_epi64(n, n); #ifdef _MSC_VER return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi)); #else return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi)); #endif } static inline int popcnt256(__m256i n) { return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1)); } static inline __m256i count256(__m256i v) { __m256i lookup = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4); __m256i low_mask = _mm256_set1_epi8(0x0f); __m256i lo = _mm256_and_si256(v, low_mask); __m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask); __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo); __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi); __m256i total = _mm256_add_epi8(popcnt1, popcnt2); return _mm256_sad_epu8(total, _mm256_setzero_si256()); } static inline int popcnt256_custom(__m256i n) { __m256i val = count256(n); //return val.m256i_i64[0] + //val.m256i_i64[1] + //val.m256i_i64[2] + //val.m256i_i64[3]; return _mm256_extract_epi64(val, 0) + _mm256_extract_epi64(val, 1) + _mm256_extract_epi64(val, 2) + _mm256_extract_epi64(val, 3); } static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) { __m256i c_bit256 = _mm256_set1_epi8((char)255); __m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b)) c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT *count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm } // 2nd part - popcnt Mula's algorithm static inline int get_count_mula(__m256i count_sum) { return _mm256_extract_epi64(count_sum, 0) + _mm256_extract_epi64(count_sum, 1) + _mm256_extract_epi64(count_sum, 2) + _mm256_extract_epi64(count_sum, 3); } // 5x times faster than gemm()-float32 // further optimizations: do mean-mult only for the last layer void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads(max_num_threads / 2); } #endif //#pragma omp parallel for //for (i = 0; i < M; ++i) #pragma omp parallel for for (i = 0; i < (M/2)*2; i += 2) { // l.n - filters [16 - 55 - 1024] float mean_val_0 = mean_arr[i + 0]; float mean_val_1 = mean_arr[i + 1]; int j, k; //__m256i all_1 = _mm256_set1_epi8(255); //for (j = 0; j < N; ++j) for (j = 0; j < (N/2)*2; j += 2) { // out_h*out_w - one channel output size [169 - 173056] //int count = 0; const int bit_step = 256; __m256i count_sum_0 = _mm256_set1_epi8(0); __m256i count_sum_1 = _mm256_set1_epi8(0); __m256i count_sum_2 = _mm256_set1_epi8(0); __m256i count_sum_3 = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); __m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8)); __m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0); xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1); xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2); xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3); //count += popcnt256(c_bit256); //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } int count_0 = get_count_mula(count_sum_0); int count_1 = get_count_mula(count_sum_1); int count_2 = get_count_mula(count_sum_2); int count_3 = get_count_mula(count_sum_3); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count_0 = count_0 - f1; // remove extra bits (from empty space for align only) count_1 = count_1 - f1; count_2 = count_2 - f1; count_3 = count_3 - f1; C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0; C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0; C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1; C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1; } int i_d; for (i_d = 0; i_d < 2; ++i_d) { float mean_val = mean_arr[i + i_d]; for (j = (N / 2) * 2; j < N; j += 1) { // out_h*out_w - one channel output size [169 - 173056] const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum); } int count = get_count_mula(count_sum); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val; } } } for (i = (M / 2) * 2; i < M; i += 1) { float mean_val = mean_arr[i]; int j, k; for (j = 0; j < N; j += 1) { // out_h*out_w - one channel output size [169 - 173056] const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum); } int count = get_count_mula(count_sum); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) * mean_val; } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_transpose(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int ldb_align) { const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; int c; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 4; w+=8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0]; data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1]; data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2]; data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3]; data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4]; data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5]; data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6]; data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7]; //_mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col-pad; ++h) { for (w = pad; w < width_col-pad-8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col-1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col-1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_align(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.00); int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 //mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8]; *dst_ptr |= (mask << (col_index % 8)); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i = 0; if (a == LINEAR) {} else if (a == LEAKY) { if (is_fma_avx2()) { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 all256_01 = _mm256_set1_ps(0.1F); for (i = 0; i < n - 8; i += 8) { //x[i] = (x[i]>0) ? x[i] : .1*x[i]; __m256 src256 = _mm256_loadu_ps(&x[i]); __m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1 __m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats __m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult; _mm256_storeu_ps(&x[i], result256); } } for (; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.0); for (i = 0; i < size; i+=8) { //__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 ////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&src[i])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 dst[i / 8] = mask; } } static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb) { __m128 row1 = _mm_loadu_ps(&A[0 * lda]); __m128 row2 = _mm_loadu_ps(&A[1 * lda]); __m128 row3 = _mm_loadu_ps(&A[2 * lda]); __m128 row4 = _mm_loadu_ps(&A[3 * lda]); _MM_TRANSPOSE4_PS(row1, row2, row3, row4); _mm_storeu_ps(&B[0 * ldb], row1); _mm_storeu_ps(&B[1 * ldb], row2); _mm_storeu_ps(&B[2 * ldb], row3); _mm_storeu_ps(&B[3 * ldb], row4); } void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += block_size) { int j, i2, j2; //int max_i2 = (i + block_size < n) ? (i + block_size) : n; if (i + block_size < n) { int max_i2 = i + block_size; for (j = 0; j < m; j += block_size) { //int max_j2 = (j + block_size < m) ? (j + block_size) : m; if (j + block_size < m) { int max_j2 = j + block_size; for (i2 = i; i2 < max_i2; i2 += 4) { for (j2 = j; j2 < max_j2; j2 += 4) { transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb); } } } else { for (i2 = i; i2 < max_i2; ++i2) { for (j2 = j; j2 < m; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } else { for (i2 = i; i2 < n; ++i2) { for (j2 = 0; j2 < m; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { const int w_offset = -pad / 2; const int h_offset = -pad / 2; int b, k; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { //for (j = 0; j < out_w; ++j) { j = 0; if(stride == 1 && is_avx() == 1) { for (j = 0; j < out_w - 8 - (size - 1); j += 8) { int out_index = j + out_w*(i + out_h*(k + c*b)); __m256 max256 = _mm256_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); max256 = _mm256_max_ps(src256, max256); } } _mm256_storeu_ps(&dst[out_index], max256); } } else if (size == 2 && stride == 2 && is_avx() == 1) { for (j = 0; j < out_w - 4; j += 4) { int out_index = j + out_w*(i + out_h*(k + c*b)); //float max = -FLT_MAX; //int max_i = -1; __m128 max128 = _mm_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { //for (m = 0; m < size; ++m) m = 0; { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); __m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4)); __m256 max256 = _mm256_max_ps(src256, src256_2); __m128 src128_0 = _mm256_extractf128_ps(max256, 0); __m128 src128_1 = _mm256_extractf128_ps(max256, 1); __m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6)); max128 = _mm_max_ps(src128, max128); } } _mm_storeu_ps(&dst[out_index], max128); } } for (; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } #else // AVX int is_avx() { return 0; } int is_fma_avx2() { return 0; } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_fast(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]); for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { //PUT_IN_REGISTER float A_PART = 1*a[i*k + s]; PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s]; for (j = 0; j < N; ++j) // out_h*out_w; { //c[i*n + j] += A_PART*b[s*n + j]; PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); //printf(" xnor_result = %d, ", xnor_result); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; //c[i*n + j] += count*mean; } } } } void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output, float *mean) { const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 //int i, f, j; int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; float sum = 0; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; sum += input[input_index] * weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } static inline int popcnt_64(uint64_t val64) { #ifdef WIN32 // Windows #ifdef _WIN64 // Windows 64-bit int tmp_count = __popcnt64(val64); #else // Windows 32-bit int tmp_count = __popcnt(val64); tmp_count += __popcnt(val64 >> 32); #endif #else // Linux #if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit int tmp_count = __builtin_popcountll(val64); #else // Linux 32-bit int tmp_count = __builtin_popcount(val64); tmp_count += __builtin_popcount(val64 >> 32); #endif #endif return tmp_count; } void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = popcnt_64(c_bit64); if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } void im2col_cpu_custom_transpose(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int ldb_align) { printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n"); } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); return; int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 1) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char*)data_col, col_index); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i; if (a == LINEAR) { } else if (a == LEAKY) { for (i = 0; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; char* byte_arr = (char*)calloc(size, sizeof(char)); for (i = 0; i < size; ++i) { if (src[i] > 0) byte_arr[i] = 1; } //for (i = 0; i < size; ++i) { // dst[i / 8] |= byte_arr[i] << (i % 8); //} for (i = 0; i < size; i += 8) { char dst_tmp = 0; dst_tmp |= byte_arr[i + 0] << 0; dst_tmp |= byte_arr[i + 1] << 1; dst_tmp |= byte_arr[i + 2] << 2; dst_tmp |= byte_arr[i + 3] << 3; dst_tmp |= byte_arr[i + 4] << 4; dst_tmp |= byte_arr[i + 5] << 5; dst_tmp |= byte_arr[i + 6] << 6; dst_tmp |= byte_arr[i + 7] << 7; dst[i / 8] = dst_tmp; } free(byte_arr); } static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size) { int i; //#pragma omp parallel for for (i = 0; i<block_size; i++) { int j; for (j = 0; j<block_size; j++) { B[j*ldb + i] = A[i*lda + j]; } } } void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += block_size) { int j, i2, j2; for (j = 0; j < m; j += block_size) { int max_i2 = i + block_size < n ? i + block_size : n; int max_j2 = j + block_size < m ? j + block_size : m; for (i2 = i; i2 < max_i2; ++i2) { for (j2 = j; j2 < max_j2; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { int b, k; const int w_offset = -pad / 2; const int h_offset = -pad / 2; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { for (j = 0; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } #endif // AVX // 32 channels -> 1 channel (with 32 floats) // 256 channels -> 8 channels (with 32 floats) void repack_input(float *input, float *re_packed_input, int w, int h, int c) { const int items_per_channel = w * h; int chan, i; for (chan = 0; chan < c; chan += 32) { for (i = 0; i < items_per_channel; ++i) { int c_pack; for (c_pack = 0; c_pack < 32; ++c_pack) { float src = input[(chan + c_pack)*items_per_channel + i]; re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src; } } } } void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align) { //l.bit_align - algined (n) by 32 //new_ldb - aligned (k) by 256 int i; //#pragma omp parallel for for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c; { int j; for (j = 0; j < src_w; j += 1) // out_h*out_w; { ((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j]; } } } void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) // out_h*out_w; { float val = 0; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s]; PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int val += (2 * count - 32) * mean_val; } C[i*ldc + j] += val; } } } void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output, int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr) { int fil; // filter index #pragma omp parallel for for (fil = 0; fil < n; ++fil) { float mean_val = mean_arr[fil]; int chan, y, x, f_y, f_x; // c_pack // channel index for (chan = 0; chan < c / 32; ++chan) //for (chan = 0; chan < l.c; chan += 32) //for (c_pack = 0; c_pack < 32; ++c_pack) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; float sum = 0; // filter - y for (f_y = 0; f_y < size; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < size; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; // normal //float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x]; //float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x]; // packed //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //sum += input * weight; //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //uint32_t bit1 = input > 0; //uint32_t bit2 = weight > 0; //uint32_t count = (~(bit1 ^ bit2)) & 1; //float result = (2 * (float)count - 1) * mean_val; //printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result); //sum += result; uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x]; //uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x]; uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x]; uint32_t xnor_result = ~(input ^ weight); int32_t count = popcnt_32(xnor_result); // mandatory Signed int sum += (2 * count - 32) * mean_val; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ PUT_IN_REGISTER float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ PUT_IN_REGISTER float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); if (BETA != 1){ #pragma omp parallel for collapse(2) for(int i = 0; i < M; ++i){ for(int j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } } is_avx(); // initialize static variable if (is_fma_avx2() && !TA && !TB) { gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); } else { #pragma omp parallel for for(int t = 0; t < M; ++t) { if (!TA && !TB) gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else if (TA && !TB) gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); else if (!TA && TB) gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); } } } #ifdef GPU #include <math.h> void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream()); CHECK_CUDA(stream_status); cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); CHECK_CUDA(status); } void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M)); float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K)); float *C_gpu = cuda_make_array(C, ldc*M); gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc); cuda_pull_array(C_gpu, C, ldc*M); cuda_free(A_gpu); cuda_free(B_gpu); cuda_free(C_gpu); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_ongpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaDeviceSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,192,729,1600); time_ongpu(0,0,384,196,1728); time_ongpu(0,0,256,196,3456); time_ongpu(0,0,256,196,2304); time_ongpu(0,0,128,4096,12544); time_ongpu(0,0,128,4096,4096); */ time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,576,12544); time_ongpu(0,0,256,2304,784); time_ongpu(1,1,2304,256,784); time_ongpu(0,0,512,4608,196); time_ongpu(1,1,4608,512,196); return 0; } #endif